summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-09-28 08:37:02 +1000
committerDave Airlie <airlied@redhat.com>2017-09-28 08:37:02 +1000
commit754270c7c56292e97d0eff924a5d5d83f92add07 (patch)
tree8ee52859dbc5e1712b22a0bcb73cadf01d9d0688 /drivers/gpu
parent9afafdbfbf5e8fca4dabd817939b61f1e766e64c (diff)
parent6f87a895709eecc1542fe947e349364ad061ac00 (diff)
downloadblackbird-op-linux-754270c7c56292e97d0eff924a5d5d83f92add07.tar.gz
blackbird-op-linux-754270c7c56292e97d0eff924a5d5d83f92add07.zip
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature pull for 4.15. Highlights: - Per VM BO support - Lots of powerplay cleanups - Powerplay support for CI - pasid mgr for kfd - interrupt infrastructure for recoverable page faults - SR-IOV fixes - initial GPU reset for vega10 - prime mmap support - ttm page table debugging improvements - lots of bug fixes * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (232 commits) drm/amdgpu: clarify license in amdgpu_trace_points.c drm/amdgpu: Add gem_prime_mmap support drm/amd/powerplay: delete dead code in smumgr drm/amd/powerplay: delete SMUM_FIELD_MASK drm/amd/powerplay: delete SMUM_WAIT_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_READ_FIELD drm/amd/powerplay: delete SMUM_SET_FIELD drm/amd/powerplay: delete SMUM_READ_VFPF_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_WRITE_VFPF_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_WRITE_FIELD drm/amd/powerplay: delete SMU_WRITE_INDIRECT_FIELD drm/amd/powerplay: move macros to hwmgr.h drm/amd/powerplay: move PHM_WAIT_VFPF_INDIRECT_FIELD to hwmgr.h drm/amd/powerplay: move SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL to hwmgr.h drm/amd/powerplay: move SMUM_WAIT_INDIRECT_FIELD_UNEQUAL to hwmgr.h drm/amd/powerplay: add new helper functions in hwmgr.h drm/amd/powerplay: use SMU_IND_INDEX/DATA_11 pair drm/amd/powerplay: refine powerplay code. drm/amd/powerplay: delete dead code in hwmgr.h drm/amd/powerplay: refine interface in struct pp_smumgr_func ...
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c130
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c187
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h165
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/psm.h)42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c246
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h58
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c273
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c598
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c358
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c94
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c90
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c6
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h66
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h19
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h24
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h6
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/include/v9_structs.h12
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h259
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig27
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile11
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c269
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/Makefile11
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c291
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h62
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c195
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h59
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c104
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c410
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h100
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c445
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/psm.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c46
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c559
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c161
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c151
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c418
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c246
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h (renamed from drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h)24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c378
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c436
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c110
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c183
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c441
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c125
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c277
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h143
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmanager.h109
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/eventmgr.h124
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h194
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_instance.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h206
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c2753
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h52
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c86
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h78
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c308
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c120
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c130
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c120
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c76
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c152
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c126
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c130
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c215
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h36
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c261
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c144
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c64
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c194
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h16
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c33
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c34
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c43
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c42
171 files changed, 9748 insertions, 8172 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index c9f09fc298bb..4d9f21831741 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -184,6 +184,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select BACKLIGHT_LCD_SUPPORT
select INTERVAL_TREE
+ select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
@@ -191,6 +192,8 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
+source "drivers/gpu/drm/amd/lib/Kconfig"
+
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 0ee184f56a60..a3fdc5a68dff 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_ARM) += arm/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 658bac0cdc5e..25a95c95df14 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -133,5 +133,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
-
-CFLAGS_amdgpu_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 103635ab784c..ebfc267467ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -65,6 +65,7 @@
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
#include "amdgpu_vcn.h"
+#include "amdgpu_mn.h"
#include "gpu_scheduler.h"
#include "amdgpu_virt.h"
@@ -91,7 +92,7 @@ extern int amdgpu_dpm;
extern int amdgpu_fw_load_type;
extern int amdgpu_aspm;
extern int amdgpu_runtime_pm;
-extern unsigned amdgpu_ip_block_mask;
+extern uint amdgpu_ip_block_mask;
extern int amdgpu_bapm;
extern int amdgpu_deep_color;
extern int amdgpu_vm_size;
@@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
extern int amdgpu_no_evict;
extern int amdgpu_direct_gma_size;
-extern unsigned amdgpu_pcie_gen_cap;
-extern unsigned amdgpu_pcie_lane_cap;
-extern unsigned amdgpu_cg_mask;
-extern unsigned amdgpu_pg_mask;
-extern unsigned amdgpu_sdma_phase_quantum;
+extern uint amdgpu_pcie_gen_cap;
+extern uint amdgpu_pcie_lane_cap;
+extern uint amdgpu_cg_mask;
+extern uint amdgpu_pg_mask;
+extern uint amdgpu_sdma_phase_quantum;
extern char *amdgpu_disable_cu;
extern char *amdgpu_virtual_display;
-extern unsigned amdgpu_pp_feature_mask;
+extern uint amdgpu_pp_feature_mask;
extern int amdgpu_vram_page_split;
extern int amdgpu_ngg;
extern int amdgpu_prim_buf_per_se;
@@ -178,6 +179,7 @@ struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
+struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -292,14 +294,25 @@ struct amdgpu_buffer_funcs {
/* provided by hw blocks that can write ptes, e.g., sdma */
struct amdgpu_vm_pte_funcs {
+ /* number of dw to reserve per operation */
+ unsigned copy_pte_num_dw;
+
/* copy pte entries from GART */
void (*copy_pte)(struct amdgpu_ib *ib,
uint64_t pe, uint64_t src,
unsigned count);
+
/* write pte one entry at a time with addr mapping */
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
uint64_t value, unsigned count,
uint32_t incr);
+
+ /* maximum nums of PTEs/PDEs in a single operation */
+ uint32_t set_max_nums_pte_pde;
+
+ /* number of dw to reserve per operation */
+ unsigned set_pte_pde_num_dw;
+
/* for linear pte/pde updates without addr mapping */
void (*set_pte_pde)(struct amdgpu_ib *ib,
uint64_t pe,
@@ -332,6 +345,7 @@ struct amdgpu_gart_funcs {
struct amdgpu_ih_funcs {
/* ring read/write ptr handling, called from interrupt context */
u32 (*get_wptr)(struct amdgpu_device *adev);
+ bool (*prescreen_iv)(struct amdgpu_device *adev);
void (*decode_iv)(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry);
void (*set_rptr)(struct amdgpu_device *adev);
@@ -399,6 +413,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
/* sub-allocation manager, it has to be protected by another lock.
@@ -455,9 +470,10 @@ struct amdgpu_sa_bo {
*/
void amdgpu_gem_force_release(struct amdgpu_device *adev);
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj);
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj);
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -731,8 +747,8 @@ struct amdgpu_ctx_mgr {
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence);
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t *seq);
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);
@@ -1014,7 +1030,6 @@ struct amdgpu_gfx {
/* reset mask */
uint32_t grbm_soft_reset;
uint32_t srbm_soft_reset;
- bool in_reset;
/* s3/s4 mask */
bool in_suspend;
/* NGG */
@@ -1056,6 +1071,7 @@ struct amdgpu_cs_parser {
/* buffer objects */
struct ww_acquire_ctx ticket;
struct amdgpu_bo_list *bo_list;
+ struct amdgpu_mn *mn;
struct amdgpu_bo_list_entry vm_pd;
struct list_head validated;
struct dma_fence *fence;
@@ -1183,6 +1199,9 @@ struct amdgpu_firmware {
/* gpu info firmware data pointer */
const struct firmware *gpu_info_fw;
+
+ void *fw_buf_ptr;
+ uint64_t fw_buf_mc;
};
/*
@@ -1197,20 +1216,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
void amdgpu_test_moves(struct amdgpu_device *adev);
/*
- * MMU Notifier
- */
-#if defined(CONFIG_MMU_NOTIFIER)
-int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
-void amdgpu_mn_unregister(struct amdgpu_bo *bo);
-#else
-static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
-{
- return -ENODEV;
-}
-static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
-#endif
-
-/*
* Debugfs
*/
struct amdgpu_debugfs {
@@ -1592,6 +1597,7 @@ struct amdgpu_device {
/* record last mm index being written through WREG32*/
unsigned long last_mm_index;
+ bool in_sriov_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -1759,6 +1765,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
+#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
@@ -1791,18 +1798,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
-int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
-int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
- uint32_t flags);
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
-struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
-bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
-bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
- int *last_invalidated);
-bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
-uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
- struct ttm_mem_reg *mem);
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
@@ -1885,10 +1880,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo);
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
#include "amdgpu_object.h"
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b9dbbf9cb8b0..dc7e25cce741 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 309f2419c6d8..c678c69936a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = amdgpu_vm_alloc_pasid,
+ .free_pasid = amdgpu_vm_free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index fd435a96481c..383204e911a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -45,7 +45,6 @@ struct amdgpu_cgs_device {
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle)
{
CGS_FUNC_ADEV;
@@ -53,13 +52,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
int ret = 0;
uint32_t domain = 0;
struct amdgpu_bo *obj;
- struct ttm_placement placement;
- struct ttm_place place;
-
- if (min_offset > max_offset) {
- BUG_ON(1);
- return -EINVAL;
- }
/* fail if the alignment is not a power of 2 */
if (((align != 1) && (align & (align - 1)))
@@ -73,41 +65,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (max_offset > adev->mc.real_vram_size)
- return -EINVAL;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
break;
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
domain = AMDGPU_GEM_DOMAIN_VRAM;
- if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
- place.fpfn =
- max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
- place.lpfn =
- min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
- TTM_PL_FLAG_VRAM;
- }
-
break;
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
break;
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
domain = AMDGPU_GEM_DOMAIN_GTT;
- place.fpfn = min_offset >> PAGE_SHIFT;
- place.lpfn = max_offset >> PAGE_SHIFT;
- place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
- TTM_PL_FLAG_UNCACHED;
break;
default:
return -EINVAL;
@@ -116,15 +86,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
*handle = 0;
- placement.placement = &place;
- placement.num_placement = 1;
- placement.busy_placement = &place;
- placement.num_busy_placement = 1;
-
- ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
- true, domain, flags,
- NULL, &placement, NULL,
- 0, &obj);
+ ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
+ NULL, NULL, 0, &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -155,19 +118,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
uint64_t *mcaddr)
{
int r;
- u64 min_offset, max_offset;
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
WARN_ON_ONCE(obj->placement.num_placement > 1);
- min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
- max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
-
r = amdgpu_bo_reserve(obj, true);
if (unlikely(r != 0))
return r;
- r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
- min_offset, max_offset, mcaddr);
+ r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
amdgpu_bo_unreserve(obj);
return r;
}
@@ -675,6 +633,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) {
switch (adev->asic_type) {
+ case CHIP_TAHITI:
+ strcpy(fw_name, "radeon/tahiti_smc.bin");
+ break;
+ case CHIP_PITCAIRN:
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/pitcairn_smc.bin");
+ }
+ break;
+ case CHIP_VERDE:
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/verde_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/verde_smc.bin");
+ }
+ break;
+ case CHIP_OLAND:
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/oland_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/oland_smc.bin");
+ }
+ break;
+ case CHIP_HAINAN:
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667)))) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hainan_k_smc.bin");
+ } else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/banks_k_2_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hainan_smc.bin");
+ }
+ break;
+ case CHIP_BONAIRE:
+ if ((adev->pdev->revision == 0x80) ||
+ (adev->pdev->revision == 0x81) ||
+ (adev->pdev->device == 0x665f)) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/bonaire_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/bonaire_smc.bin");
+ }
+ break;
+ case CHIP_HAWAII:
+ if (adev->pdev->revision == 0x80) {
+ info->is_kicker = true;
+ strcpy(fw_name, "radeon/hawaii_k_smc.bin");
+ } else {
+ strcpy(fw_name, "radeon/hawaii_smc.bin");
+ }
+ break;
case CHIP_TOPAZ:
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
@@ -838,6 +875,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
sys_info->value = adev->pdev->subsystem_vendor;
break;
+ case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
+ sys_info->value = adev->pdev->devfn;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8d1cf2d3e663..f51b41f094ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- if (amdgpu_connector->edid) {
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
- }
+ kfree(amdgpu_connector->edid);
+ amdgpu_connector->edid = NULL;
}
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 60d8bedb694d..c6a214f1e991 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -473,11 +473,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
return -EPERM;
/* Check if we have user pages and nobody bound the BO already */
- if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
- size_t size = sizeof(struct page *);
-
- size *= bo->tbo.ttm->num_pages;
- memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
+ lobj->user_pages) {
+ amdgpu_ttm_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
+ false);
+ if (r)
+ return r;
+ amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
+ lobj->user_pages);
binding_userptr = true;
}
@@ -502,7 +507,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- bool need_mmap_lock = false;
unsigned i, tries = 10;
int r;
@@ -510,9 +514,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
if (p->bo_list) {
- need_mmap_lock = p->bo_list->first_userptr !=
- p->bo_list->num_entries;
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev);
}
INIT_LIST_HEAD(&duplicates);
@@ -521,9 +525,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (p->uf_entry.robj)
list_add(&p->uf_entry.tv.head, &p->validated);
- if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
-
while (1) {
struct list_head need_pages;
unsigned i;
@@ -543,23 +544,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
INIT_LIST_HEAD(&need_pages);
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo;
e = &p->bo_list->array[i];
+ bo = e->robj;
- if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
+ if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
/* We acquired a page array, but somebody
* invalidated it. Free it and try again
*/
release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages,
+ bo->tbo.ttm->num_pages,
false);
kvfree(e->user_pages);
e->user_pages = NULL;
}
- if (e->robj->tbo.ttm->state != tt_bound &&
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
!e->user_pages) {
list_del(&e->tv.head);
list_add(&e->tv.head, &need_pages);
@@ -636,9 +639,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- fpriv->vm.last_eviction_counter =
- atomic64_read(&p->adev->num_evictions);
-
if (p->bo_list) {
struct amdgpu_bo *gds = p->bo_list->gds_obj;
struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -679,9 +679,6 @@ error_validate:
error_free_pages:
- if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
-
if (p->bo_list) {
for (i = p->bo_list->first_userptr;
i < p->bo_list->num_entries; ++i) {
@@ -728,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
- if (!error)
- ttm_eu_fence_buffer_objects(&parser->ticket,
- &parser->validated,
- parser->fence);
- else if (backoff)
+ if (error && backoff)
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
@@ -768,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
- if (r)
- return r;
-
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
return r;
@@ -825,7 +814,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
}
- r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
+ r = amdgpu_vm_handle_moved(adev, vm);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
+ if (r)
+ return r;
if (amdgpu_vm_debug && p->bo_list) {
/* Invalidate all BOs to test for userspace bugs */
@@ -835,7 +830,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (!bo)
continue;
- amdgpu_vm_bo_invalidate(adev, bo);
+ amdgpu_vm_bo_invalidate(adev, bo, false);
}
}
@@ -860,7 +855,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
}
if (p->job->vm) {
- p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
+ p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
r = amdgpu_bo_vm_update_pte(p);
if (r)
@@ -928,11 +923,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
uint64_t offset;
uint8_t *kptr;
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
+ r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
+ return r;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
@@ -1133,14 +1128,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct amdgpu_job *job;
+ unsigned i;
+ uint64_t seq;
+
int r;
+ amdgpu_mn_lock(p->mn);
+ if (p->bo_list) {
+ for (i = p->bo_list->first_userptr;
+ i < p->bo_list->num_entries; ++i) {
+ struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ amdgpu_mn_unlock(p->mn);
+ return -ERESTARTSYS;
+ }
+ }
+ }
+
job = p->job;
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
if (r) {
amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
return r;
}
@@ -1148,14 +1160,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job->fence_ctx = entity->fence_context;
p->fence = dma_fence_get(&job->base.s_fence->finished);
+ r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
+ if (r) {
+ dma_fence_put(p->fence);
+ dma_fence_put(&job->base.s_fence->finished);
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
+ }
+
amdgpu_cs_post_dependencies(p);
- cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
- job->uf_sequence = cs->out.handle;
+ cs->out.handle = seq;
+ job->uf_sequence = seq;
+
amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
amd_sched_entity_push_job(&job->base);
+
+ ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
+ amdgpu_mn_unlock(p->mn);
+
return 0;
}
@@ -1383,6 +1409,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
array[i] = fence;
} else { /* NULL, the fence has been already signaled */
r = 1;
+ first = i;
goto out;
}
}
@@ -1462,78 +1489,36 @@ err_free_fences:
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
+ struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
- unsigned i;
-
- if (!parser->bo_list)
- return NULL;
+ int r;
addr /= AMDGPU_GPU_PAGE_SIZE;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo_list_entry *lobj;
-
- lobj = &parser->bo_list->array[i];
- if (!lobj->bo_va)
- continue;
-
- list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
-
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
-
- list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->start > addr ||
- addr > mapping->last)
- continue;
+ mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
+ if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
+ return -EINVAL;
- *bo = lobj->bo_va->base.bo;
- return mapping;
- }
- }
+ *bo = mapping->bo_va->base.bo;
+ *map = mapping;
- return NULL;
-}
+ /* Double check that the BO is reserved by this CS */
+ if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+ return -EINVAL;
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
- int r;
+ r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
+ if (unlikely(r))
+ return r;
- if (!parser->bo_list)
+ if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
return 0;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
-
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
-
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
-
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
- return r;
- }
-
- return 0;
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index a11e44340b23..75c933b1a432 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -246,8 +246,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
return 0;
}
-uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
- struct dma_fence *fence)
+int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
+ struct dma_fence *fence, uint64_t* handler)
{
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
uint64_t seq = cring->sequence;
@@ -258,9 +258,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
other = cring->fences[idx];
if (other) {
signed long r;
- r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+ r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
if (r < 0)
- DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+ return r;
}
dma_fence_get(fence);
@@ -271,8 +271,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
spin_unlock(&ctx->ring_lock);
dma_fence_put(other);
+ if (handler)
+ *handler = seq;
- return seq;
+ return 0;
}
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e630d918fefc..3e84ddf9e3b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -65,6 +65,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
static const char *amdgpu_asic_name[] = {
"TAHITI",
@@ -402,6 +403,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
*/
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
{
+ /* No doorbell on SI hardware generation */
+ if (adev->asic_type < CHIP_BONAIRE) {
+ adev->doorbell.base = 0;
+ adev->doorbell.size = 0;
+ adev->doorbell.num_doorbells = 0;
+ adev->doorbell.ptr = NULL;
+ return 0;
+ }
+
/* doorbell bar mapping */
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
@@ -887,6 +897,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
return r;
}
+static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct atom_context *ctx = adev->mode_info.atom_context;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
+}
+
+static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
+ NULL);
+
/**
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
*
@@ -906,6 +930,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
adev->mode_info.atom_context = NULL;
kfree(adev->mode_info.atom_card_info);
adev->mode_info.atom_card_info = NULL;
+ device_remove_file(adev->dev, &dev_attr_vbios_version);
}
/**
@@ -922,6 +947,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
{
struct card_info *atom_card_info =
kzalloc(sizeof(struct card_info), GFP_KERNEL);
+ int ret;
if (!atom_card_info)
return -ENOMEM;
@@ -958,6 +984,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_init(adev);
amdgpu_atombios_allocate_fb_scratch(adev);
}
+
+ ret = device_create_file(adev->dev, &dev_attr_vbios_version);
+ if (ret) {
+ DRM_ERROR("Failed to create device file for VBIOS version\n");
+ return ret;
+ }
+
return 0;
}
@@ -1757,10 +1790,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
- if (amdgpu_sriov_vf(adev)) {
- amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
+ if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, false);
- }
return 0;
}
@@ -2051,9 +2082,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- if (adev->asic_type >= CHIP_BONAIRE)
- /* doorbell bar mapping */
- amdgpu_doorbell_init(adev);
+ /* doorbell bar mapping */
+ amdgpu_doorbell_init(adev);
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
@@ -2201,6 +2231,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ r = amdgpu_debugfs_vbios_dump_init(adev);
+ if (r)
+ DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
+
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -2276,8 +2310,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
- if (adev->asic_type >= CHIP_BONAIRE)
- amdgpu_doorbell_fini(adev);
+ amdgpu_doorbell_fini(adev);
amdgpu_debugfs_regs_cleanup(adev);
}
@@ -2546,7 +2579,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
- (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
if (adev->ip_blocks[i].status.hang) {
DRM_INFO("Some block need full reset!\n");
return true;
@@ -2654,7 +2688,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
mutex_lock(&adev->virt.lock_reset);
atomic_inc(&adev->gpu_reset_counter);
- adev->gfx.in_reset = true;
+ adev->in_sriov_reset = true;
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -2765,7 +2799,7 @@ give_up_reset:
dev_info(adev->dev, "GPU reset successed!\n");
}
- adev->gfx.in_reset = false;
+ adev->in_sriov_reset = false;
mutex_unlock(&adev->virt.lock_reset);
return r;
}
@@ -3463,10 +3497,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
valuesize = sizeof(values);
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
- else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
- r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
- &valuesize);
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
else
return -EINVAL;
@@ -3754,6 +3785,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
{
return 0;
}
+
+static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+
+ seq_write(m, adev->bios, adev->bios_size);
+ return 0;
+}
+
+static const struct drm_info_list amdgpu_vbios_dump_list[] = {
+ {"amdgpu_vbios",
+ amdgpu_debugfs_get_vbios_dump,
+ 0, NULL},
+};
+
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return amdgpu_debugfs_add_files(adev,
+ amdgpu_vbios_dump_list, 1);
+}
#else
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
{
@@ -3763,5 +3816,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
+static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
+{
+ return 0;
+}
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 1cb52fd19060..e997ebbe43ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
}
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
+amdgpu_get_vce_clock_state(void *handle, u32 idx)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (idx < adev->pm.dpm.num_of_vce_states)
return &adev->pm.dpm.vce_states[idx];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 8c96a4caa715..f79f9ea58b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -241,177 +241,119 @@ enum amdgpu_pcie_gen {
AMDGPU_PCIE_GEN_INVALID = 0xffff
};
-struct amdgpu_dpm_funcs {
- int (*get_temperature)(struct amdgpu_device *adev);
- int (*pre_set_power_state)(struct amdgpu_device *adev);
- int (*set_power_state)(struct amdgpu_device *adev);
- void (*post_set_power_state)(struct amdgpu_device *adev);
- void (*display_configuration_changed)(struct amdgpu_device *adev);
- u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
- u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
- void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
- void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
- int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
- bool (*vblank_too_short)(struct amdgpu_device *adev);
- void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
- void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
- void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
- void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
- u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
- int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
- int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
- int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(struct amdgpu_device *adev);
- int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*get_mclk_od)(struct amdgpu_device *adev);
- int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
- int (*check_state_equal)(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
- bool *equal);
- int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
- int *size);
-
- struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
- int (*reset_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(struct amdgpu_device *adev,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(struct amdgpu_device *adev,
- enum amd_pp_profile_type type);
-};
+#define amdgpu_dpm_pre_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+ ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
-#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
-#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
-#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
-#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
-#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
-#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
+#define amdgpu_dpm_print_power_state(adev, ps) \
+ ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+ ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_enable_bapm(adev, e) \
+ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
- (adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
+ ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
#define amdgpu_dpm_get_temperature(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_temperature((adev)))
+ ((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
- (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
+ ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
- (adev)->pm.funcs->get_fan_control_mode((adev)))
+ ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
- (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
+ ((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
- -EINVAL)
+ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_sclk((adev), (l)))
+ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->get_mclk((adev), (l)))
-
+ ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
- (adev)->pm.funcs->force_performance_level((adev), (l)))
+ ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_powergate_uvd(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_uvd((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_powergate_vce(adev, g) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
- (adev)->pm.funcs->powergate_vce((adev), (g)))
+ ((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
#define amdgpu_dpm_get_current_power_state(adev) \
- (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
#define amdgpu_dpm_get_pp_num_states(adev, data) \
- (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
+ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
#define amdgpu_dpm_get_pp_table(adev, table) \
- (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
+ ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
+ ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
+ ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
#define amdgpu_dpm_force_clock_level(adev, type, level) \
- (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
+ ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
#define amdgpu_dpm_get_sclk_od(adev) \
- (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
+ ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_sclk_od(adev, value) \
- (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
+ ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
+ ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
+ ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
- (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
+#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
+ ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
-#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
- (adev)->pm.funcs->get_vce_clock_state((adev), (i)))
+ ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->pp_enabled ? \
- (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
- (adev)->pm.dpm.forced_level)
+#define amdgpu_dpm_get_performance_level(adev) \
+ ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_get_power_profile_state(adev, query) \
- ((adev)->powerplay.pp_funcs->get_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->get_power_profile_state(\
(adev)->powerplay.pp_handle, query))
#define amdgpu_dpm_set_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->set_power_profile_state(\
+ ((adev)->powerplay.pp_funcs->set_power_profile_state(\
(adev)->powerplay.pp_handle, request))
#define amdgpu_dpm_switch_power_profile(adev, type) \
- ((adev)->powerplay.pp_funcs->switch_power_profile(\
+ ((adev)->powerplay.pp_funcs->switch_power_profile(\
(adev)->powerplay.pp_handle, type))
struct amdgpu_dpm {
@@ -485,7 +427,6 @@ struct amdgpu_pm {
struct amdgpu_dpm dpm;
const struct firmware *fw; /* SMC firmware */
uint32_t fw_version;
- const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
@@ -551,6 +492,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
u8 amdgpu_encode_pci_lane_width(u32 lanes);
struct amd_vce_state*
-amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
+amdgpu_get_vce_clock_state(void *handle, u32 idx);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0f16986ec5bc..4f98960e47f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -69,9 +69,10 @@
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
* - 3.18.0 - Export gpu always on cu bitmap
* - 3.19.0 - Add support for UVD MJPEG decode
+ * - 3.20.0 - Add support for local BOs
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 19
+#define KMS_DRIVER_MINOR 20
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -91,7 +92,7 @@ int amdgpu_dpm = -1;
int amdgpu_fw_load_type = -1;
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
-unsigned amdgpu_ip_block_mask = 0xffffffff;
+uint amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
int amdgpu_vm_size = -1;
@@ -106,14 +107,14 @@ int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2;
int amdgpu_no_evict = 0;
int amdgpu_direct_gma_size = 0;
-unsigned amdgpu_pcie_gen_cap = 0;
-unsigned amdgpu_pcie_lane_cap = 0;
-unsigned amdgpu_cg_mask = 0xffffffff;
-unsigned amdgpu_pg_mask = 0xffffffff;
-unsigned amdgpu_sdma_phase_quantum = 32;
+uint amdgpu_pcie_gen_cap = 0;
+uint amdgpu_pcie_lane_cap = 0;
+uint amdgpu_cg_mask = 0xffffffff;
+uint amdgpu_pg_mask = 0xffffffff;
+uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-unsigned amdgpu_pp_feature_mask = 0xffffffff;
+uint amdgpu_pp_feature_mask = 0xffffffff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -608,6 +609,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(dev);
drm_dev_unref(dev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
}
static void
@@ -852,6 +855,7 @@ static struct drm_driver kms_driver = {
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
.gem_prime_vmap = amdgpu_gem_prime_vmap,
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
+ .gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9afa9c097e1f..562930b17a6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED,
- true, &gobj);
+ true, NULL, &gobj);
if (ret) {
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
return -ENOMEM;
@@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
if (rfb->obj) {
amdgpufb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7171968f261e..b0d45c8e6bb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
}
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
- int alignment, u32 initial_domain,
- u64 flags, bool kernel,
- struct drm_gem_object **obj)
+ int alignment, u32 initial_domain,
+ u64 flags, bool kernel,
+ struct reservation_object *resv,
+ struct drm_gem_object **obj)
{
- struct amdgpu_bo *robj;
+ struct amdgpu_bo *bo;
int r;
*obj = NULL;
@@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
retry:
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
- flags, NULL, NULL, 0, &robj);
+ flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -71,7 +72,7 @@ retry:
}
return r;
}
- *obj = &robj->gem_base;
+ *obj = &bo->gem_base;
return 0;
}
@@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
+ struct mm_struct *mm;
int r;
+
+ mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
+ if (mm && mm != current->mm)
+ return -EPERM;
+
+ if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
+ abo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return -EPERM;
+
r = amdgpu_bo_reserve(abo, false);
if (r)
return r;
@@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
-static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
-{
- /* if anything is swapped out don't swap it in here,
- just abort and wait for the next CS */
- if (!amdgpu_bo_gpu_accessible(bo))
- return -ERESTARTSYS;
-
- if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
- return -ERESTARTSYS;
-
- return 0;
-}
-
-static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct list_head *list)
-{
- struct ttm_validate_buffer *entry;
-
- list_for_each_entry(entry, list, head) {
- struct amdgpu_bo *bo =
- container_of(entry->bo, struct amdgpu_bo, tbo);
- if (amdgpu_gem_vm_check(NULL, bo))
- return false;
- }
-
- return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
-}
-
void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
@@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry vm_pd;
- struct list_head list;
+ struct list_head list, duplicates;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
int r;
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
tv.shared = true;
@@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
@@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
if (bo_va && --bo_va->ref_count == 0) {
amdgpu_vm_bo_rmv(adev, bo_va);
- if (amdgpu_gem_vm_ready(adev, vm, &list)) {
+ if (amdgpu_vm_ready(vm)) {
struct dma_fence *fence = NULL;
r = amdgpu_vm_clear_freed(adev, vm, &fence);
@@ -214,18 +197,22 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_gem_create *args = data;
+ uint64_t flags = args->in.domain_flags;
uint64_t size = args->in.bo_size;
+ struct reservation_object *resv = NULL;
struct drm_gem_object *gobj;
uint32_t handle;
- bool kernel = false;
int r;
/* reject invalid gem flags */
- if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_VRAM_CLEARED))
+ if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED |
+ AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
return -EINVAL;
/* reject invalid gem domains */
@@ -240,7 +227,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
- kernel = true;
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
size = size << AMDGPU_GDS_SHIFT;
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
@@ -252,10 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
}
size = roundup(size, PAGE_SIZE);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
+ if (r)
+ return r;
+
+ resv = vm->root.base.bo->tbo.resv;
+ }
+
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
(u32)(0xffffffff & args->in.domains),
- args->in.domain_flags,
- kernel, &gobj);
+ flags, false, resv, &gobj);
+ if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
+ if (!r) {
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
+
+ abo->parent = amdgpu_bo_ref(vm->root.base.bo);
+ }
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ }
if (r)
return r;
@@ -297,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
/* create a gem object to contain this object in */
- r = amdgpu_gem_object_create(adev, args->size, 0,
- AMDGPU_GEM_DOMAIN_CPU, 0,
- 0, &gobj);
+ r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
+ 0, 0, NULL, &gobj);
if (r)
return r;
@@ -317,8 +318,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
-
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
@@ -333,8 +332,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
-
- up_read(&current->mm->mmap_sem);
}
r = drm_gem_handle_create(filp, gobj, &handle);
@@ -511,10 +508,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct list_head *list,
uint32_t operation)
{
- int r = -ERESTARTSYS;
+ int r;
- if (!amdgpu_gem_vm_ready(adev, vm, list))
- goto error;
+ if (!amdgpu_vm_ready(vm))
+ return;
r = amdgpu_vm_update_directories(adev, vm);
if (r)
@@ -551,7 +548,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
struct amdgpu_bo_list_entry vm_pd;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
- struct list_head list;
+ struct list_head list, duplicates;
uint64_t va_flags;
int r = 0;
@@ -587,6 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
}
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
gobj = drm_gem_object_lookup(filp, args->handle);
@@ -603,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_unref;
@@ -669,6 +667,7 @@ error_unref:
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
struct amdgpu_bo *robj;
@@ -716,6 +715,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
+ if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
+ amdgpu_vm_bo_invalidate(adev, robj, true);
+
amdgpu_bo_unreserve(robj);
break;
default:
@@ -745,8 +747,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- ttm_bo_type_device,
- &gobj);
+ false, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 4f6c68fc1dd9..4fcd98e65998 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -260,8 +260,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
/* create MQD for KIQ */
ring = &adev->gfx.kiq.ring;
if (!ring->mqd_obj) {
+ /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
+ * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
+ * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
+ * KIQ MQD no matter SRIOV or Bare-metal
+ */
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
+ AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 3ab4c65ecc8b..f5f27e4f0f7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -169,6 +169,12 @@ restart_ih:
while (adev->irq.ih.rptr != wptr) {
u32 ring_index = adev->irq.ih.rptr >> 2;
+ /* Prescreening of high-frequency interrupts */
+ if (!amdgpu_ih_prescreen_iv(adev)) {
+ adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
+ continue;
+ }
+
/* Before dispatching irq to IP blocks, send it to amdkfd */
amdgpu_amdkfd_interrupt(adev,
(const void *) &adev->irq.ih.ring[ring_index]);
@@ -190,3 +196,79 @@ restart_ih:
return IRQ_HANDLED;
}
+
+/**
+ * amdgpu_ih_add_fault - Add a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a retry page fault interrupt is
+ * received. If this is a new page fault, it will be added to a hash
+ * table. The return value indicates whether this is a new fault, or
+ * a fault that was already known and is already being handled.
+ *
+ * If there are too many pending page faults, this will fail. Retry
+ * interrupts should be ignored in this case until there is enough
+ * free space.
+ *
+ * Returns 0 if the fault was added, 1 if the fault was already known,
+ * -ENOSPC if there are too many pending faults.
+ */
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r = -ENOSPC;
+
+ if (WARN_ON_ONCE(!adev->irq.ih.faults))
+ /* Should be allocated in <IP>_ih_sw_init on GPUs that
+ * support retry faults and require retry filtering.
+ */
+ return r;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ /* Only let the hash table fill up to 50% for best performance */
+ if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
+ goto unlock_out;
+
+ r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
+ if (!r)
+ adev->irq.ih.faults->count++;
+
+ /* chash_table_copy_in should never fail unless we're losing count */
+ WARN_ON_ONCE(r < 0);
+
+unlock_out:
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+ return r;
+}
+
+/**
+ * amdgpu_ih_clear_fault - Remove a page fault record
+ *
+ * @adev: amdgpu device pointer
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a page fault has been handled. Any
+ * future interrupt with this key will be processed as a new
+ * page fault.
+ */
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
+{
+ unsigned long flags;
+ int r;
+
+ if (!adev->irq.ih.faults)
+ return;
+
+ spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
+
+ r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
+ if (!WARN_ON_ONCE(r < 0)) {
+ adev->irq.ih.faults->count--;
+ WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
+ }
+
+ spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 3de8e74e5b3a..ada89358e220 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+#include <linux/chash.h>
+
struct amdgpu_device;
/*
* vega10+ IH clients
@@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
#define AMDGPU_IH_CLIENTID_LEGACY 0
+#define AMDGPU_PAGEFAULT_HASH_BITS 8
+struct amdgpu_retryfault_hashtable {
+ DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spinlock_t lock;
+ int count;
+};
+
/*
* R6xx+ IH ring
*/
@@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
bool use_doorbell;
bool use_bus_addr;
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
+ struct amdgpu_retryfault_hashtable *faults;
};
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
@@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
bool use_bus_addr);
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
int amdgpu_ih_process(struct amdgpu_device *adev);
+int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
+void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e16229000a98..4fd06f8d9768 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
}
r = amdgpu_vm_init(adev, &fpriv->vm,
- AMDGPU_VM_CONTEXT_GFX);
+ AMDGPU_VM_CONTEXT_GFX, 0);
if (r) {
kfree(fpriv);
goto out_suspend;
@@ -841,8 +841,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
if (amdgpu_sriov_vf(adev)) {
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
- if (r)
+ if (r) {
+ amdgpu_vm_fini(adev, &fpriv->vm);
+ kfree(fpriv);
goto out_suspend;
+ }
}
mutex_init(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 3b0f2ec6eec7..bd67f4cb8e6c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -50,8 +50,10 @@ struct amdgpu_mn {
struct hlist_node node;
/* objects protected by lock */
- struct mutex lock;
+ struct rw_semaphore lock;
struct rb_root_cached objects;
+ struct mutex read_lock;
+ atomic_t recursion;
};
struct amdgpu_mn_node {
@@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
struct amdgpu_bo *bo, *next_bo;
mutex_lock(&adev->mn_lock);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node,
&rmn->objects.rb_root, it.rb) {
@@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
}
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
@@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
schedule_work(&rmn->work);
}
+
+/**
+ * amdgpu_mn_lock - take the write side lock for this mn
+ */
+void amdgpu_mn_lock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ down_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_unlock - drop the write side lock for this mn
+ */
+void amdgpu_mn_unlock(struct amdgpu_mn *mn)
+{
+ if (mn)
+ up_write(&mn->lock);
+}
+
+/**
+ * amdgpu_mn_read_lock - take the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Take the rmn read side lock.
+ */
+static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
+{
+ mutex_lock(&rmn->read_lock);
+ if (atomic_inc_return(&rmn->recursion) == 1)
+ down_read_non_owner(&rmn->lock);
+ mutex_unlock(&rmn->read_lock);
+}
+
+/**
+ * amdgpu_mn_read_unlock - drop the rmn read lock
+ *
+ * @rmn: our notifier
+ *
+ * Drop the rmn read side lock.
+ */
+static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
+{
+ if (atomic_dec_return(&rmn->recursion) == 0)
+ up_read_non_owner(&rmn->lock);
+}
+
/**
* amdgpu_mn_invalidate_node - unmap all BOs of a node
*
@@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue;
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- DRM_ERROR("(%ld) failed to reserve user bo\n", r);
- continue;
- }
-
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (r)
- DRM_ERROR("(%ld) failed to validate user bo\n", r);
-
- amdgpu_bo_unreserve(bo);
+ amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
}
}
@@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ amdgpu_mn_read_lock(rmn);
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
amdgpu_mn_invalidate_node(node, start, end);
}
+}
- mutex_unlock(&rmn->lock);
+/**
+ * amdgpu_mn_invalidate_range_end - callback to notify about mm change
+ *
+ * @mn: our notifier
+ * @mn: the mm this callback is about
+ * @start: start of updated range
+ * @end: end of updated range
+ *
+ * Release the lock again to allow new command submissions.
+ */
+static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
+
+ amdgpu_mn_read_unlock(rmn);
}
static const struct mmu_notifier_ops amdgpu_mn_ops = {
.release = amdgpu_mn_release,
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
+ .invalidate_range_end = amdgpu_mn_invalidate_range_end,
};
/**
@@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
*
* Creates a notifier context for current->mm.
*/
-static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
{
struct mm_struct *mm = current->mm;
struct amdgpu_mn *rmn;
@@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
rmn->adev = adev;
rmn->mm = mm;
rmn->mn.ops = &amdgpu_mn_ops;
- mutex_init(&rmn->lock);
+ init_rwsem(&rmn->lock);
rmn->objects = RB_ROOT_CACHED;
+ mutex_init(&rmn->read_lock);
+ atomic_set(&rmn->recursion, 0);
r = __mmu_notifier_register(&rmn->mn, mm);
if (r)
@@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
INIT_LIST_HEAD(&bos);
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
kfree(node);
@@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
if (!node) {
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
if (!node) {
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return -ENOMEM;
}
}
@@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
interval_tree_insert(&node->it, &rmn->objects);
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
return 0;
}
@@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
return;
}
- mutex_lock(&rmn->lock);
+ down_write(&rmn->lock);
/* save the next list entry for later */
head = bo->mn_list.next;
@@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
kfree(node);
}
- mutex_unlock(&rmn->lock);
+ up_write(&rmn->lock);
mutex_unlock(&adev->mn_lock);
}
+
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
index fbdff3e02aa3..d0095a3793b8 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,20 +19,34 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
+ * Authors: Christian König
*/
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "power_state.h"
-#include "hardwaremanager.h"
+#ifndef __AMDGPU_MN_H__
+#define __AMDGPU_MN_H__
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
-
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
-
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
+/*
+ * MMU Notifier
+ */
+struct amdgpu_mn;
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
+#if defined(CONFIG_MMU_NOTIFIER)
+void amdgpu_mn_lock(struct amdgpu_mn *mn);
+void amdgpu_mn_unlock(struct amdgpu_mn *mn);
+struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
+int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
+void amdgpu_mn_unregister(struct amdgpu_bo *bo);
+#else
+static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
+static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
+static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
+{
+ return NULL;
+}
+static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
+{
+ return -ENODEV;
+}
+static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
+#endif
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 9e495da0bb03..6982baeccd14 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -64,11 +64,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
return false;
}
-static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
- struct ttm_placement *placement,
- struct ttm_place *places,
- u32 domain, u64 flags)
+void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
+ struct ttm_placement *placement = &abo->placement;
+ struct ttm_place *places = abo->placements;
+ u64 flags = abo->flags;
u32 c = 0;
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
@@ -151,27 +152,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
placement->busy_placement = places;
}
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
-
- amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
- domain, abo->flags);
-}
-
-static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
- struct ttm_placement *placement)
-{
- BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
-
- memcpy(bo->placements, placement->placement,
- placement->num_placement * sizeof(struct ttm_place));
- bo->placement.num_placement = placement->num_placement;
- bo->placement.num_busy_placement = placement->num_busy_placement;
- bo->placement.placement = bo->placements;
- bo->placement.busy_placement = bo->placements;
-}
-
/**
* amdgpu_bo_create_reserved - create reserved BO for kernel use
*
@@ -303,14 +283,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
*cpu_addr = NULL;
}
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr)
+static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+ unsigned long size, int byte_align,
+ bool kernel, u32 domain, u64 flags,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ uint64_t init_value,
+ struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
enum ttm_bo_type type;
@@ -384,10 +363,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
#endif
- amdgpu_fill_placement_to_bo(bo, placement);
- /* Kernel allocation are uninterruptible */
+ bo->tbo.bdev = &adev->mman.bdev;
+ amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
+ /* Kernel allocation are uninterruptible */
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
@@ -442,27 +422,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
unsigned long size, int byte_align,
struct amdgpu_bo *bo)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
int r;
if (bo->shadow)
return 0;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
- AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW,
- NULL, &placement,
- bo->tbo.resv,
- 0,
- &bo->shadow);
+ r = amdgpu_bo_do_create(adev, size, byte_align, true,
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_SHADOW,
+ NULL, bo->tbo.resv, 0,
+ &bo->shadow);
if (!r) {
bo->shadow->parent = amdgpu_bo_ref(bo);
mutex_lock(&adev->shadow_list_lock);
@@ -484,18 +454,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
uint64_t init_value,
struct amdgpu_bo **bo_ptr)
{
- struct ttm_placement placement = {0};
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
int r;
- memset(&placements, 0, sizeof(placements));
- amdgpu_ttm_placement_init(adev, &placement, placements,
- domain, parent_flags);
-
- r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
- parent_flags, sg, &placement, resv,
- init_value, bo_ptr);
+ r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
+ parent_flags, sg, resv, init_value, bo_ptr);
if (r)
return r;
@@ -672,7 +635,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r, i;
- unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
@@ -704,22 +666,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
}
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ /* force to pin into visible video ram */
+ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
amdgpu_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
- /* force to pin into visible video ram */
- if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
- !(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
- (!max_offset || max_offset >
- adev->mc.visible_vram_size)) {
- if (WARN_ON_ONCE(min_offset >
- adev->mc.visible_vram_size))
- return -EINVAL;
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- } else {
- fpfn = min_offset >> PAGE_SHIFT;
- lpfn = max_offset >> PAGE_SHIFT;
- }
+ unsigned fpfn, lpfn;
+
+ fpfn = min_offset >> PAGE_SHIFT;
+ lpfn = max_offset >> PAGE_SHIFT;
+
if (fpfn > bo->placements[i].fpfn)
bo->placements[i].fpfn = fpfn;
if (!bo->placements[i].lpfn ||
@@ -929,7 +885,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
abo = container_of(bo, struct amdgpu_bo, tbo);
- amdgpu_vm_bo_invalidate(adev, abo);
+ amdgpu_vm_bo_invalidate(adev, abo, evict);
amdgpu_bo_kunmap(abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index a288fa6d72c8..39b6bf6fb051 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -35,6 +35,7 @@
/* bo virtual addresses in a vm */
struct amdgpu_bo_va_mapping {
+ struct amdgpu_bo_va *bo_va;
struct list_head list;
struct rb_node rb;
uint64_t start;
@@ -49,12 +50,17 @@ struct amdgpu_bo_va {
struct amdgpu_vm_bo_base base;
/* protected by bo being reserved */
- struct dma_fence *last_pt_update;
unsigned ref_count;
+ /* all other members protected by the VM PD being reserved */
+ struct dma_fence *last_pt_update;
+
/* mappings for this bo_va */
struct list_head invalids;
struct list_head valids;
+
+ /* If the mappings are cleared or filled */
+ bool cleared;
};
struct amdgpu_bo {
@@ -189,14 +195,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
struct reservation_object *resv,
uint64_t init_value,
struct amdgpu_bo **bo_ptr);
-int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
- unsigned long size, int byte_align,
- bool kernel, u32 domain, u64 flags,
- struct sg_table *sg,
- struct ttm_placement *placement,
- struct reservation_object *resv,
- uint64_t init_value,
- struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
unsigned long size, int align,
u32 domain, struct amdgpu_bo **bo_ptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7df503aedb69..f6ce52956e6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -74,7 +74,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.dpm.ac_power = true;
else
adev->pm.dpm.ac_power = false;
- if (adev->pm.funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
mutex_unlock(&adev->pm.mutex);
}
@@ -88,9 +88,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->pp_enabled) {
+ if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
- } else
+ else
pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n",
@@ -119,7 +119,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
}
if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
@@ -140,13 +140,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- enum amd_dpm_forced_level level;
+ enum amd_dpm_forced_level level = 0xff;
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ level = amdgpu_dpm_get_performance_level(adev);
+ else
+ level = adev->pm.dpm.forced_level;
+
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
@@ -167,7 +171,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
+ enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
/* Can't force performance level when the card is off */
@@ -175,7 +179,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- current_level = amdgpu_dpm_get_performance_level(adev);
+ if (adev->powerplay.pp_funcs->get_performance_level)
+ current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -203,9 +208,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
if (current_level == level)
return count;
- if (adev->pp_enabled)
- amdgpu_dpm_force_performance_level(adev, level);
- else {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -233,7 +236,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -257,8 +260,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0;
- if (adev->pp_enabled) {
-
+ if (adev->powerplay.pp_funcs->get_current_power_state
+ && adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
@@ -280,25 +283,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- struct pp_states_info data;
- enum amd_pm_state_type pm = 0;
- int i;
-
- if (adev->pp_force_state_enabled && adev->pp_enabled) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
-
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
- if (i == data.nums)
- i = -EINVAL;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", i);
-
- } else
+ if (adev->pp_force_state_enabled)
+ return amdgpu_get_pp_cur_state(dev, attr, buf);
+ else
return snprintf(buf, PAGE_SIZE, "\n");
}
@@ -330,7 +318,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
state != POWER_STATE_TYPE_DEFAULT) {
amdgpu_dpm_dispatch_task(adev,
- AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+ AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
adev->pp_force_state_enabled = true;
}
}
@@ -347,7 +335,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -368,7 +356,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -380,14 +368,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
-
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
@@ -416,10 +401,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
mask |= 1 << level;
}
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
+
fail:
return count;
}
@@ -430,14 +414,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -465,11 +446,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
+
fail:
return count;
}
@@ -480,14 +459,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- ssize_t size = 0;
-
- if (adev->pp_enabled)
- size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
- else if (adev->pm.funcs->print_clock_levels)
- size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
- return size;
+ if (adev->powerplay.pp_funcs->print_clock_levels)
+ return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
@@ -515,11 +491,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
}
mask |= 1 << level;
}
-
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->force_clock_level)
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
- else if (adev->pm.funcs->force_clock_level)
- adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
+
fail:
return count;
}
@@ -532,10 +506,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
- else if (adev->pm.funcs->get_sclk_od)
- value = adev->pm.funcs->get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -556,12 +528,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
+ if (adev->powerplay.pp_funcs->set_sclk_od)
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
if (adev->pp_enabled) {
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_sclk_od) {
- adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -578,10 +550,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
- else if (adev->pm.funcs->get_mclk_od)
- value = adev->pm.funcs->get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
}
@@ -602,12 +572,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
+ if (adev->powerplay.pp_funcs->set_mclk_od)
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
if (adev->pp_enabled) {
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
- } else if (adev->pm.funcs->set_mclk_od) {
- adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ } else {
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
amdgpu_pm_compute_clocks(adev);
}
@@ -621,14 +591,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- int ret = 0;
+ int ret = 0xff;
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->get_power_profile_state)
ret = amdgpu_dpm_get_power_profile_state(
adev, query);
- else if (adev->pm.funcs->get_power_profile_state)
- ret = adev->pm.funcs->get_power_profile_state(
- adev, query);
if (ret)
return ret;
@@ -675,15 +642,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
char *sub_str, buf_cpy[128], *tmp_str;
const char delimiter[3] = {' ', '\n', '\0'};
long int value;
- int ret = 0;
+ int ret = 0xff;
if (strncmp("reset", buf, strlen("reset")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->reset_power_profile_state)
ret = amdgpu_dpm_reset_power_profile_state(
adev, request);
- else if (adev->pm.funcs->reset_power_profile_state)
- ret = adev->pm.funcs->reset_power_profile_state(
- adev, request);
if (ret) {
count = -EINVAL;
goto fail;
@@ -692,12 +656,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
}
if (strncmp("set", buf, strlen("set")) == 0) {
- if (adev->pp_enabled)
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
ret = amdgpu_dpm_set_power_profile_state(
adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+
if (ret) {
count = -EINVAL;
goto fail;
@@ -745,13 +707,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
loop++;
}
-
- if (adev->pp_enabled)
- ret = amdgpu_dpm_set_power_profile_state(
- adev, request);
- else if (adev->pm.funcs->set_power_profile_state)
- ret = adev->pm.funcs->set_power_profile_state(
- adev, request);
+ if (adev->powerplay.pp_funcs->set_power_profile_state)
+ ret = amdgpu_dpm_set_power_profile_state(adev, request);
if (ret)
count = -EINVAL;
@@ -831,7 +788,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
+ if (!adev->powerplay.pp_funcs->get_temperature)
temp = 0;
else
temp = amdgpu_dpm_get_temperature(adev);
@@ -862,7 +819,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
@@ -879,7 +836,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err;
int value;
- if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
return -EINVAL;
err = kstrtoint(buf, 10, &value);
@@ -919,9 +876,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- err = amdgpu_dpm_set_fan_speed_percent(adev, value);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ err = amdgpu_dpm_set_fan_speed_percent(adev, value);
+ if (err)
+ return err;
+ }
return count;
}
@@ -932,11 +891,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
+ if (err)
+ return err;
+ }
speed = (speed * 255) / 100;
@@ -949,11 +910,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
int err;
- u32 speed;
+ u32 speed = 0;
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- if (err)
- return err;
+ if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
+ if (err)
+ return err;
+ }
return sprintf(buf, "%i\n", speed);
}
@@ -1008,21 +971,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
return 0;
/* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->pm.funcs->get_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->pm.funcs->get_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
effective_mode &= ~S_IRUGO;
- if ((!adev->pm.funcs->set_fan_speed_percent &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->pm.funcs->set_fan_control_mode &&
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
effective_mode &= ~S_IWUSR;
/* hide max/min values if we can't both query and manage the fan */
- if ((!adev->pm.funcs->set_fan_speed_percent &&
- !adev->pm.funcs->get_fan_speed_percent) &&
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0;
@@ -1055,7 +1018,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->pm.funcs->get_temperature) {
+ if (adev->powerplay.pp_funcs->get_temperature) {
int temp = amdgpu_dpm_get_temperature(adev);
if (temp < adev->pm.dpm.thermal.min_temp)
@@ -1087,7 +1050,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
true : false;
/* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->pm.funcs->vblank_too_short) {
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
if (amdgpu_dpm_vblank_too_short(adev))
single_display = false;
}
@@ -1216,7 +1179,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
struct amdgpu_ps *ps;
enum amd_pm_state_type dpm_state;
int ret;
- bool equal;
+ bool equal = false;
/* if dpm init failed */
if (!adev->pm.dpm_enabled)
@@ -1236,7 +1199,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
else
return;
- if (amdgpu_dpm == 1) {
+ if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
printk("switching from power state:\n");
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
printk("switching to power state:\n");
@@ -1245,15 +1208,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
/* update whether vce is active */
ps->vce_active = adev->pm.dpm.vce_active;
-
- amdgpu_dpm_display_configuration_changed(adev);
+ if (adev->powerplay.pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
ret = amdgpu_dpm_pre_set_power_state(adev);
if (ret)
return;
- if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
- equal = false;
+ if (adev->powerplay.pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
if (equal)
return;
@@ -1264,7 +1229,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
- if (adev->pm.funcs->force_performance_level) {
+ if (adev->powerplay.pp_funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
/* force low perf level for thermal */
@@ -1280,7 +1245,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
+ if (adev->powerplay.pp_funcs->powergate_uvd) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_uvd(adev, !enable);
@@ -1302,7 +1267,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
+ if (adev->powerplay.pp_funcs->powergate_vce) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_powergate_vce(adev, !enable);
@@ -1337,8 +1302,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{
int i;
- if (adev->pp_enabled)
- /* TO DO */
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
@@ -1353,10 +1317,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized)
return 0;
- if (!adev->pp_enabled) {
- if (adev->pm.funcs->get_temperature == NULL)
- return 0;
- }
+ if (adev->powerplay.pp_funcs->get_temperature == NULL)
+ return 0;
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
@@ -1496,7 +1458,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
}
if (adev->pp_enabled) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.new_active_crtcs = 0;
@@ -1634,8 +1596,8 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
return amdgpu_debugfs_pm_info_pp(m, adev);
} else {
mutex_lock(&adev->pm.mutex);
- if (adev->pm.funcs->debugfs_print_current_performance_level)
- adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
+ if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
+ adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
else
seq_printf(m, "Debugfs support not implemented for this asic\n");
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b7e1c026c0c8..2d2f0960b025 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -87,17 +87,28 @@ static int amdgpu_pp_early_init(void *handle)
case CHIP_OLAND:
case CHIP_HAINAN:
amd_pp->ip_funcs = &si_dpm_ip_funcs;
+ amd_pp->pp_funcs = &si_dpm_funcs;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
- amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ if (amdgpu_dpm == -1) {
+ amd_pp->ip_funcs = &ci_dpm_ip_funcs;
+ amd_pp->pp_funcs = &ci_dpm_funcs;
+ } else {
+ adev->pp_enabled = true;
+ if (amdgpu_create_pp_handle(adev))
+ return -EINVAL;
+ amd_pp->ip_funcs = &pp_ip_funcs;
+ amd_pp->pp_funcs = &pp_dpm_funcs;
+ }
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
+ amd_pp->pp_funcs = &kv_dpm_funcs;
break;
#endif
default:
@@ -128,7 +139,7 @@ static int amdgpu_pp_late_init(void *handle)
if (adev->pp_enabled && adev->pm.dpm_enabled) {
amdgpu_pm_sysfs_init(adev);
- amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 5b3f92891f89..90af8e82b16a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
ttm_bo_kunmap(&bo->dma_buf_vmap);
}
+int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ unsigned asize = amdgpu_bo_size(bo);
+ int ret;
+
+ if (!vma->vm_file)
+ return -ENODEV;
+
+ if (adev == NULL)
+ return -ENODEV;
+
+ /* Check for valid size. */
+ if (asize < vma->vm_end - vma->vm_start)
+ return -EINVAL;
+
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
+ return -EPERM;
+ }
+ vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
+
+ /* prime mmap does not need to check access, so allow here */
+ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
+ if (ret)
+ return ret;
+
+ ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
+ drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
+
+ return ret;
+}
+
struct drm_gem_object *
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
+ bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 8c2204c7b384..447d446b5015 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
psp->ring_init = psp_v3_1_ring_init;
psp->ring_create = psp_v3_1_ring_create;
+ psp->ring_stop = psp_v3_1_ring_stop;
psp->ring_destroy = psp_v3_1_ring_destroy;
psp->cmd_submit = psp_v3_1_cmd_submit;
psp->compare_sram_data = psp_v3_1_compare_sram_data;
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
+ psp->mode1_reset = psp_v3_1_mode1_reset;
break;
case CHIP_RAVEN:
-#if 0
psp->init_microcode = psp_v10_0_init_microcode;
-#endif
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
psp->ring_init = psp_v10_0_ring_init;
psp->ring_create = psp_v10_0_ring_create;
+ psp->ring_stop = psp_v10_0_ring_stop;
psp->ring_destroy = psp_v10_0_ring_destroy;
psp->cmd_submit = psp_v10_0_cmd_submit;
psp->compare_sram_data = psp_v10_0_compare_sram_data;
+ psp->mode1_reset = psp_v10_0_mode1_reset;
break;
default:
return -EINVAL;
@@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
static int psp_sw_fini(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
return 0;
}
@@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
static int psp_hw_start(struct psp_context *psp)
{
+ struct amdgpu_device *adev = psp->adev;
int ret;
- ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
- return ret;
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ ret = psp_bootloader_load_sysdrv(psp);
+ if (ret)
+ return ret;
- ret = psp_bootloader_load_sos(psp);
- if (ret)
- return ret;
+ ret = psp_bootloader_load_sos(psp);
+ if (ret)
+ return ret;
+ }
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
if (ret)
@@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
static int psp_suspend(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct psp_context *psp = &adev->psp;
+
+ ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
+ if (ret) {
+ DRM_ERROR("PSP ring stop failed\n");
+ return ret;
+ }
+
return 0;
}
@@ -487,6 +508,22 @@ failed:
return ret;
}
+static bool psp_check_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (adev->flags & AMD_IS_APU)
+ return true;
+
+ return false;
+}
+
+static int psp_reset(void* handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ return psp_mode1_reset(&adev->psp);
+}
+
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
enum AMDGPU_UCODE_ID ucode_type)
{
@@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
.suspend = psp_suspend,
.resume = psp_resume,
.is_idle = NULL,
+ .check_soft_reset = psp_check_reset,
.wait_for_idle = NULL,
- .soft_reset = NULL,
+ .soft_reset = psp_reset,
.set_clockgating_state = psp_set_clockgating_state,
.set_powergating_state = psp_set_powergating_state,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 538fa9dbfb21..ce4654550416 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -66,6 +66,8 @@ struct psp_context
struct psp_gfx_cmd_resp *cmd);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
+ int (*ring_stop)(struct psp_context *psp,
+ enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
@@ -74,6 +76,7 @@ struct psp_context
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
+ int (*mode1_reset)(struct psp_context *psp);
/* fence buffer */
struct amdgpu_bo *fw_pri_bo;
@@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
+#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
@@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
+#define psp_mode1_reset(psp) \
+ ((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
extern const struct amd_ip_funcs psp_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 1c88bd5e29ad..213988f336ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -14,62 +14,6 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_ttm_tt_populate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
-TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
- TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
- TP_ARGS(adev, dma_address, phys_address),
- TP_STRUCT__entry(
- __field(uint16_t, domain)
- __field(uint8_t, bus)
- __field(uint8_t, slot)
- __field(uint8_t, func)
- __field(uint64_t, dma)
- __field(uint64_t, phys)
- ),
- TP_fast_assign(
- __entry->domain = pci_domain_nr(adev->pdev->bus);
- __entry->bus = adev->pdev->bus->number;
- __entry->slot = PCI_SLOT(adev->pdev->devfn);
- __entry->func = PCI_FUNC(adev->pdev->devfn);
- __entry->dma = dma_address;
- __entry->phys = phys_address;
- ),
- TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
- (unsigned)__entry->domain,
- (unsigned)__entry->bus,
- (unsigned)__entry->slot,
- (unsigned)__entry->func,
- (unsigned long long)__entry->dma,
- (unsigned long long)__entry->phys)
-);
-
TRACE_EVENT(amdgpu_mm_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
@@ -473,5 +417,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
index 385b7e1d72f9..9ec96b9e85d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c
@@ -1,4 +1,23 @@
/* Copyright Red Hat Inc 2010.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
* Author : Dave Airlie <airlied@redhat.com>
*/
#include <drm/drmP.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 7ef6c28a34d9..15a28578d458 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -42,6 +42,7 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/debugfs.h>
+#include <linux/iommu.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "bif/bif_4_1_d.h"
@@ -608,6 +609,7 @@ struct amdgpu_ttm_tt {
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ uint32_t last_set_pages;
struct list_head list;
};
@@ -621,6 +623,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
flags |= FOLL_WRITE;
+ down_read(&current->mm->mmap_sem);
+
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only use anonymous memory
to prevent problems with writeback */
@@ -628,8 +632,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
struct vm_area_struct *vma;
vma = find_vma(gtt->usermm, gtt->userptr);
- if (!vma || vma->vm_file || vma->vm_end < end)
+ if (!vma || vma->vm_file || vma->vm_end < end) {
+ up_read(&current->mm->mmap_sem);
return -EPERM;
+ }
}
do {
@@ -656,42 +662,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
} while (pinned < ttm->num_pages);
+ up_read(&current->mm->mmap_sem);
return 0;
release_pages:
release_pages(pages, pinned, 0);
+ up_read(&current->mm->mmap_sem);
return r;
}
-static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_populate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ gtt->last_set_pages = atomic_read(&gtt->mmu_invalidations);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i])
+ put_page(ttm->pages[i]);
+
+ ttm->pages[i] = pages ? pages[i] : NULL;
}
}
-static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned i;
- if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
- for (i = 0; i < ttm->num_pages; i++) {
- trace_amdgpu_ttm_tt_unpopulate(
- adev,
- gtt->ttm.dma_address[i],
- page_to_phys(ttm->pages[i]));
- }
+ for (i = 0; i < ttm->num_pages; ++i) {
+ struct page *page = ttm->pages[i];
+
+ if (!page)
+ continue;
+
+ if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
+ set_page_dirty(page);
+
+ mark_page_accessed(page);
}
}
@@ -721,8 +729,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- amdgpu_trace_dma_map(ttm);
-
return 0;
release_sg:
@@ -734,7 +740,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- struct sg_page_iter sg_iter;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -747,16 +752,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
- set_page_dirty(page);
-
- mark_page_accessed(page);
- put_page(page);
- }
-
- amdgpu_trace_dma_unmap(ttm);
+ amdgpu_ttm_tt_mark_user_pages(ttm);
sg_free_table(ttm->sg);
}
@@ -818,7 +814,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg tmp;
-
struct ttm_placement placement;
struct ttm_place placements;
int r;
@@ -834,7 +829,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placement.busy_placement = &placements;
placements.fpfn = 0;
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
- placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+ placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
if (unlikely(r))
@@ -941,8 +936,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -962,52 +955,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
ttm->state = tt_unbound;
- r = 0;
- goto trace_mappings;
+ return 0;
}
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- r = ttm_dma_populate(&gtt->ttm, adev->dev);
- goto trace_mappings;
+ return ttm_dma_populate(&gtt->ttm, adev->dev);
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
-
- r = 0;
-trace_mappings:
- if (likely(!r))
- amdgpu_trace_dma_map(ttm);
- return r;
+ return ttm_populate_and_map_pages(adev->dev, &gtt->ttm);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct amdgpu_device *adev;
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
+ amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
return;
@@ -1018,8 +985,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
adev = amdgpu_ttm_adev(ttm->bdev);
- amdgpu_trace_dma_unmap(ttm);
-
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
ttm_dma_unpopulate(&gtt->ttm, adev->dev);
@@ -1027,14 +992,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(adev->dev, &gtt->ttm);
}
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
@@ -1051,6 +1009,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
spin_lock_init(&gtt->guptasklock);
INIT_LIST_HEAD(&gtt->guptasks);
atomic_set(&gtt->mmu_invalidations, 0);
+ gtt->last_set_pages = 0;
return 0;
}
@@ -1103,6 +1062,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
return prev_invalidated != *last_invalidated;
}
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+ if (gtt == NULL || !gtt->userptr)
+ return false;
+
+ return atomic_read(&gtt->mmu_invalidations) != gtt->last_set_pages;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1202,14 +1171,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
}
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
if (!write || mask != 0xffffffff)
- value = RREG32(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
- WREG32(mmMM_DATA, value);
+ WREG32_NO_KIQ(mmMM_DATA, value);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
if (!write) {
@@ -1557,8 +1526,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
struct dma_fence **fence)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- /* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
- uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
+ uint32_t max_bytes = 8 *
+ adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *mm_node;
@@ -1590,8 +1559,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
++mm_node;
}
- /* 10 double words for each SDMA_OP_PTEPDE cmd */
- num_dw = num_loops * 10;
+ /* num of dwords for each SDMA_OP_PTEPDE cmd */
+ num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
/* for IB padding */
num_dw += 64;
@@ -1697,9 +1666,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32(mmMM_DATA);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ value = RREG32_NO_KIQ(mmMM_DATA);
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
r = put_user(value, (uint32_t *)buf);
@@ -1715,10 +1684,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
return result;
}
+static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
+ while (size) {
+ unsigned long flags;
+ uint32_t value;
+
+ if (*pos >= adev->mc.mc_vram_size)
+ return result;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ return result;
+}
+
static const struct file_operations amdgpu_ttm_vram_fops = {
.owner = THIS_MODULE,
.read = amdgpu_ttm_vram_read,
- .llseek = default_llseek
+ .write = amdgpu_ttm_vram_write,
+ .llseek = default_llseek,
};
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
@@ -1770,6 +1779,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
#endif
+static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ int r;
+ uint64_t phys;
+ struct iommu_domain *dom;
+
+ // always return 8 bytes
+ if (size != 8)
+ return -EINVAL;
+
+ // only accept page addresses
+ if (*pos & 0xFFF)
+ return -EINVAL;
+
+ dom = iommu_get_domain_for_dev(adev->dev);
+ if (dom)
+ phys = iommu_iova_to_phys(dom, *pos);
+ else
+ phys = *pos;
+
+ r = copy_to_user(buf, &phys, 8);
+ if (r)
+ return -EFAULT;
+
+ return 8;
+}
+
+static const struct file_operations amdgpu_ttm_iova_fops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_iova_to_phys_read,
+ .llseek = default_llseek
+};
+
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+ int domain;
+} ttm_debugfs_entries[] = {
+ { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
+#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
+ { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
+#endif
+ { "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
+};
+
#endif
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
@@ -1780,22 +1836,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev->ddev->primary;
struct dentry *ent, *root = minor->debugfs_root;
- ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_vram_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.mc_vram_size);
- adev->mman.vram = ent;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
- adev, &amdgpu_ttm_gtt_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- i_size_write(ent->d_inode, adev->mc.gart_size);
- adev->mman.gtt = ent;
+ for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
+ ent = debugfs_create_file(
+ ttm_debugfs_entries[count].name,
+ S_IFREG | S_IRUGO, root,
+ adev,
+ ttm_debugfs_entries[count].fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+ if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
+ i_size_write(ent->d_inode, adev->mc.mc_vram_size);
+ else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
+ i_size_write(ent->d_inode, adev->mc.gart_size);
+ adev->mman.debugfs_entries[count] = ent;
+ }
-#endif
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
#ifdef CONFIG_SWIOTLB
@@ -1805,7 +1860,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
#else
-
return 0;
#endif
}
@@ -1813,14 +1867,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
- debugfs_remove(adev->mman.vram);
- adev->mman.vram = NULL;
-
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- debugfs_remove(adev->mman.gtt);
- adev->mman.gtt = NULL;
-#endif
-
+ for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
+ debugfs_remove(adev->mman.debugfs_entries[i]);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 43093bffa2cf..7abae6867339 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,6 +24,7 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
+#include "amdgpu.h"
#include "gpu_scheduler.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
@@ -45,8 +46,7 @@ struct amdgpu_mman {
bool initialized;
#if defined(CONFIG_DEBUG_FS)
- struct dentry *vram;
- struct dentry *gtt;
+ struct dentry *debugfs_entries[8];
#endif
/* buffer handling */
@@ -82,4 +82,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
+int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
+void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
+int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
+ uint32_t flags);
+bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
+bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+ int *last_invalidated);
+bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
+uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
+ struct ttm_mem_reg *mem);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 36c763310df5..65649026b836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_SMU;
case CHIP_VEGA10:
- if (!load_type)
- return AMDGPU_FW_LOAD_DIRECT;
- else
- return AMDGPU_FW_LOAD_PSP;
case CHIP_RAVEN:
- if (load_type != 2)
+ if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
return AMDGPU_FW_LOAD_PSP;
@@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
{
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
- uint64_t fw_mc_addr;
- void *fw_buf_ptr = NULL;
uint64_t fw_offset = 0;
int i, err;
struct amdgpu_firmware_info *ucode = NULL;
@@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
- err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
- NULL, NULL, 0, bo);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
- goto failed;
- }
+ if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
+ err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
+ amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
+ NULL, NULL, 0, bo);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
+ goto failed;
+ }
- err = amdgpu_bo_reserve(*bo, false);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
- goto failed_reserve;
- }
+ err = amdgpu_bo_reserve(*bo, false);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
+ goto failed_reserve;
+ }
- err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
- &fw_mc_addr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
- goto failed_pin;
- }
+ err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
+ &adev->firmware.fw_buf_mc);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
+ goto failed_pin;
+ }
- err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
- if (err) {
- dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
- goto failed_kmap;
- }
+ err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
+ if (err) {
+ dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
+ goto failed_kmap;
+ }
- amdgpu_bo_unreserve(*bo);
+ amdgpu_bo_unreserve(*bo);
+ }
- memset(fw_buf_ptr, 0, adev->firmware.fw_size);
+ memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
/*
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
@@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
ucode = &adev->firmware.ucode[i];
if (ucode->fw) {
header = (const struct common_firmware_header *)ucode->fw->data;
- amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
- (void *)((uint8_t *)fw_buf_ptr + fw_offset));
+ amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
const struct gfx_firmware_header_v1_0 *cp_hdr;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
- amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
- fw_buf_ptr + fw_offset);
+ amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
+ adev->firmware.fw_buf_ptr + fw_offset);
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
}
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e19928dae8e3..e8bd50cf9785 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
+ int i;
kfree(adev->uvd.saved_bo);
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
@@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.ring);
+ for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
+ amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
+
release_firmware(adev->uvd.fw);
return 0;
@@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
@@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
start = amdgpu_bo_gpu_offset(bo);
@@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
return -EINVAL;
}
- r = amdgpu_cs_sysvm_access_required(parser);
- if (r)
- return r;
-
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c855366521ab..b46280c1279f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
+ int r;
if (index == 0xffffffff)
index = 0;
@@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
- mapping = amdgpu_cs_find_mapping(p, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index);
- return -EINVAL;
+ return r;
}
if ((addr + (uint64_t)size) >
@@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- r = amdgpu_cs_sysvm_access_required(p);
- if (r)
- return r;
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index bd20ff018512..bbcc67038203 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -27,12 +27,59 @@
*/
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
+#include <linux/idr.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
/*
+ * PASID manager
+ *
+ * PASIDs are global address space identifiers that can be shared
+ * between the GPU, an IOMMU and the driver. VMs on different devices
+ * may use the same PASID if they share the same address
+ * space. Therefore PASIDs are allocated using a global IDA. VMs are
+ * looked up from the PASID per amdgpu_device.
+ */
+static DEFINE_IDA(amdgpu_vm_pasid_ida);
+
+/**
+ * amdgpu_vm_alloc_pasid - Allocate a PASID
+ * @bits: Maximum width of the PASID in bits, must be at least 1
+ *
+ * Allocates a PASID of the given width while keeping smaller PASIDs
+ * available if possible.
+ *
+ * Returns a positive integer on success. Returns %-EINVAL if bits==0.
+ * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on
+ * memory allocation failure.
+ */
+int amdgpu_vm_alloc_pasid(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&amdgpu_vm_pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+/**
+ * amdgpu_vm_free_pasid - Free a PASID
+ * @pasid: PASID to free
+ */
+void amdgpu_vm_free_pasid(unsigned int pasid)
+{
+ ida_simple_remove(&amdgpu_vm_pasid_ida, pasid);
+}
+
+/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
@@ -140,7 +187,7 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
- entry->robj = vm->root.bo;
+ entry->robj = vm->root.base.bo;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
@@ -149,86 +196,80 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_validate_layer - validate a single page table level
+ * amdgpu_vm_validate_pt_bos - validate the page table BOs
*
- * @parent: parent page table level
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
* @validate: callback to do the validation
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
*/
-static int amdgpu_vm_validate_level(struct amdgpu_vm_pt *parent,
- int (*validate)(void *, struct amdgpu_bo *),
- void *param, bool use_cpu_for_update,
- struct ttm_bo_global *glob)
+int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int (*validate)(void *p, struct amdgpu_bo *bo),
+ void *param)
{
- unsigned i;
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
int r;
- if (use_cpu_for_update) {
- r = amdgpu_bo_kmap(parent->bo, NULL);
- if (r)
- return r;
- }
-
- if (!parent->entries)
- return 0;
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->evicted)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
- for (i = 0; i <= parent->last_entry_used; ++i) {
- struct amdgpu_vm_pt *entry = &parent->entries[i];
+ bo_base = list_first_entry(&vm->evicted,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
- if (!entry->bo)
- continue;
+ bo = bo_base->bo;
+ BUG_ON(!bo);
+ if (bo->parent) {
+ r = validate(param, bo);
+ if (r)
+ return r;
- r = validate(param, entry->bo);
- if (r)
- return r;
+ spin_lock(&glob->lru_lock);
+ ttm_bo_move_to_lru_tail(&bo->tbo);
+ if (bo->shadow)
+ ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
+ spin_unlock(&glob->lru_lock);
+ }
- spin_lock(&glob->lru_lock);
- ttm_bo_move_to_lru_tail(&entry->bo->tbo);
- if (entry->bo->shadow)
- ttm_bo_move_to_lru_tail(&entry->bo->shadow->tbo);
- spin_unlock(&glob->lru_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel &&
+ vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(bo, NULL);
+ if (r)
+ return r;
+ }
- /*
- * Recurse into the sub directory. This is harmless because we
- * have only a maximum of 5 layers.
- */
- r = amdgpu_vm_validate_level(entry, validate, param,
- use_cpu_for_update, glob);
- if (r)
- return r;
+ spin_lock(&vm->status_lock);
+ if (bo->tbo.type != ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->moved);
+ else
+ list_move(&bo_base->vm_status, &vm->relocated);
}
+ spin_unlock(&vm->status_lock);
- return r;
+ return 0;
}
/**
- * amdgpu_vm_validate_pt_bos - validate the page table BOs
+ * amdgpu_vm_ready - check VM is ready for updates
*
- * @adev: amdgpu device pointer
- * @vm: vm providing the BOs
- * @validate: callback to do the validation
- * @param: parameter for the validation callback
+ * @vm: VM to check
*
- * Validate the page table BOs on command submission if neccessary.
+ * Check if all VM PDs/PTs are ready for updates
*/
-int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int (*validate)(void *p, struct amdgpu_bo *bo),
- void *param)
+bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- uint64_t num_evictions;
+ bool ready;
- /* We only need to validate the page tables
- * if they aren't already valid.
- */
- num_evictions = atomic64_read(&adev->num_evictions);
- if (num_evictions == vm->last_eviction_counter)
- return 0;
+ spin_lock(&vm->status_lock);
+ ready = list_empty(&vm->evicted);
+ spin_unlock(&vm->status_lock);
- return amdgpu_vm_validate_level(&vm->root, validate, param,
- vm->use_cpu_for_update,
- adev->mman.bdev.glob);
+ return ready;
}
/**
@@ -294,11 +335,11 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
struct amdgpu_bo *pt;
- if (!entry->bo) {
+ if (!entry->base.bo) {
r = amdgpu_bo_create(adev,
amdgpu_vm_bo_size(adev, level),
AMDGPU_GPU_PAGE_SIZE, true,
@@ -319,9 +360,14 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
/* Keep a reference to the root directory to avoid
* freeing them up in the wrong order.
*/
- pt->parent = amdgpu_bo_ref(vm->root.bo);
-
- entry->bo = pt;
+ pt->parent = amdgpu_bo_ref(parent->base.bo);
+
+ entry->base.vm = vm;
+ entry->base.bo = pt;
+ list_add_tail(&entry->base.bo_list, &pt->va);
+ spin_lock(&vm->status_lock);
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
entry->addr = 0;
}
@@ -988,7 +1034,7 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.resv, owner);
+ amdgpu_sync_resv(adev, &sync, vm->root.base.bo->tbo.resv, owner);
r = amdgpu_sync_wait(&sync, true);
amdgpu_sync_free(&sync);
@@ -1007,18 +1053,17 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
*/
static int amdgpu_vm_update_level(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+ struct amdgpu_vm_pt *parent)
{
struct amdgpu_bo *shadow;
struct amdgpu_ring *ring = NULL;
uint64_t pd_addr, shadow_addr = 0;
- uint32_t incr = amdgpu_vm_bo_size(adev, level + 1);
uint64_t last_pde = ~0, last_pt = ~0, last_shadow = ~0;
unsigned count = 0, pt_idx, ndw = 0;
struct amdgpu_job *job;
struct amdgpu_pte_update_params params;
struct dma_fence *fence = NULL;
+ uint32_t incr;
int r;
@@ -1027,10 +1072,10 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
- shadow = parent->bo->shadow;
+ shadow = parent->base.bo->shadow;
if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM);
if (unlikely(r))
return r;
@@ -1046,7 +1091,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* assume the worst case */
ndw += parent->last_entry_used * 6;
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
if (shadow) {
shadow_addr = amdgpu_bo_gpu_offset(shadow);
@@ -1066,12 +1111,17 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
/* walk over the address space and update the directory */
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_bo *bo = parent->entries[pt_idx].bo;
+ struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
+ struct amdgpu_bo *bo = entry->base.bo;
uint64_t pde, pt;
if (bo == NULL)
continue;
+ spin_lock(&vm->status_lock);
+ list_del_init(&entry->base.vm_status);
+ spin_unlock(&vm->status_lock);
+
pt = amdgpu_bo_gpu_offset(bo);
pt = amdgpu_gart_get_vm_pde(adev, pt);
/* Don't update huge pages here */
@@ -1082,6 +1132,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
parent->entries[pt_idx].addr = pt | AMDGPU_PTE_VALID;
pde = pd_addr + pt_idx * 8;
+ incr = amdgpu_bo_size(bo);
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt) ||
(count == AMDGPU_VM_MAX_UPDATE_SIZE)) {
@@ -1109,7 +1160,7 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
}
if (count) {
- if (vm->root.bo->shadow)
+ if (vm->root.base.bo->shadow)
params.func(&params, last_shadow, last_pt,
count, incr, AMDGPU_PTE_VALID);
@@ -1122,7 +1173,8 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
amdgpu_job_free(job);
} else {
amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, parent->bo->tbo.resv,
+ amdgpu_sync_resv(adev, &job->sync,
+ parent->base.bo->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
if (shadow)
amdgpu_sync_resv(adev, &job->sync,
@@ -1135,26 +1187,11 @@ static int amdgpu_vm_update_level(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(parent->bo, fence, true);
- dma_fence_put(vm->last_dir_update);
- vm->last_dir_update = dma_fence_get(fence);
- dma_fence_put(fence);
+ amdgpu_bo_fence(parent->base.bo, fence, true);
+ dma_fence_put(vm->last_update);
+ vm->last_update = fence;
}
}
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
- if (!entry->bo)
- continue;
-
- r = amdgpu_vm_update_level(adev, vm, entry, level + 1);
- if (r)
- return r;
- }
return 0;
@@ -1170,7 +1207,8 @@ error_free:
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
+static void amdgpu_vm_invalidate_level(struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *parent)
{
unsigned pt_idx;
@@ -1181,11 +1219,15 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_vm_pt *parent)
for (pt_idx = 0; pt_idx <= parent->last_entry_used; ++pt_idx) {
struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- if (!entry->bo)
+ if (!entry->base.bo)
continue;
entry->addr = ~0ULL;
- amdgpu_vm_invalidate_level(entry);
+ spin_lock(&vm->status_lock);
+ if (list_empty(&entry->base.vm_status))
+ list_add(&entry->base.vm_status, &vm->relocated);
+ spin_unlock(&vm->status_lock);
+ amdgpu_vm_invalidate_level(vm, entry);
}
}
@@ -1203,9 +1245,38 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_vm_update_level(adev, vm, &vm->root, 0);
- if (r)
- amdgpu_vm_invalidate_level(&vm->root);
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->relocated)) {
+ struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_bo *bo;
+
+ bo_base = list_first_entry(&vm->relocated,
+ struct amdgpu_vm_bo_base,
+ vm_status);
+ spin_unlock(&vm->status_lock);
+
+ bo = bo_base->bo->parent;
+ if (bo) {
+ struct amdgpu_vm_bo_base *parent;
+ struct amdgpu_vm_pt *pt;
+
+ parent = list_first_entry(&bo->va,
+ struct amdgpu_vm_bo_base,
+ bo_list);
+ pt = container_of(parent, struct amdgpu_vm_pt, base);
+
+ r = amdgpu_vm_update_level(adev, vm, pt);
+ if (r) {
+ amdgpu_vm_invalidate_level(vm, &vm->root);
+ return r;
+ }
+ spin_lock(&vm->status_lock);
+ } else {
+ spin_lock(&vm->status_lock);
+ list_del_init(&bo_base->vm_status);
+ }
+ }
+ spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
@@ -1236,7 +1307,7 @@ void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
*entry = &p->vm->root;
while ((*entry)->entries) {
idx = addr >> (p->adev->vm_manager.block_size * level--);
- idx %= amdgpu_bo_size((*entry)->bo) / 8;
+ idx %= amdgpu_bo_size((*entry)->base.bo) / 8;
*parent = *entry;
*entry = &(*entry)->entries[idx];
}
@@ -1272,7 +1343,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
p->src ||
!(flags & AMDGPU_PTE_VALID)) {
- dst = amdgpu_bo_gpu_offset(entry->bo);
+ dst = amdgpu_bo_gpu_offset(entry->base.bo);
dst = amdgpu_gart_get_vm_pde(p->adev, dst);
flags = AMDGPU_PTE_VALID;
} else {
@@ -1298,18 +1369,18 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
tmp = p->pages_addr;
p->pages_addr = NULL;
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->bo);
+ pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_cpu_set_ptes(p, pde, dst, 1, 0, flags);
p->pages_addr = tmp;
} else {
- if (parent->bo->shadow) {
- pd_addr = amdgpu_bo_gpu_offset(parent->bo->shadow);
+ if (parent->base.bo->shadow) {
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
- pd_addr = amdgpu_bo_gpu_offset(parent->bo);
+ pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
pde = pd_addr + (entry - parent->entries) * 8;
amdgpu_vm_do_set_ptes(p, pde, dst, 1, 0, flags);
}
@@ -1360,7 +1431,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if (entry->addr & AMDGPU_PDE_PTE)
continue;
- pt = entry->bo;
+ pt = entry->base.bo;
if (use_cpu_update) {
pe_start = (unsigned long)amdgpu_bo_kptr(pt);
} else {
@@ -1396,8 +1467,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- int r;
-
/**
* The MC L1 TLB supports variable sized pages, based on a fragment
* field in the PTE. When this field is set to a non-zero value, page
@@ -1416,39 +1485,38 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* Userspace can support this by aligning virtual base address and
* allocation size to the fragment size.
*/
- unsigned pages_per_frag = params->adev->vm_manager.fragment_size;
- uint64_t frag_flags = AMDGPU_PTE_FRAG(pages_per_frag);
- uint64_t frag_align = 1 << pages_per_frag;
-
- uint64_t frag_start = ALIGN(start, frag_align);
- uint64_t frag_end = end & ~(frag_align - 1);
+ unsigned max_frag = params->adev->vm_manager.fragment_size;
+ int r;
/* system pages are non continuously */
- if (params->src || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end))
+ if (params->src || !(flags & AMDGPU_PTE_VALID))
return amdgpu_vm_update_ptes(params, start, end, dst, flags);
- /* handle the 4K area at the beginning */
- if (start != frag_start) {
- r = amdgpu_vm_update_ptes(params, start, frag_start,
- dst, flags);
+ while (start != end) {
+ uint64_t frag_flags, frag_end;
+ unsigned frag;
+
+ /* This intentionally wraps around if no bit is set */
+ frag = min((unsigned)ffs(start) - 1,
+ (unsigned)fls64(end - start) - 1);
+ if (frag >= max_frag) {
+ frag_flags = AMDGPU_PTE_FRAG(max_frag);
+ frag_end = end & ~((1ULL << max_frag) - 1);
+ } else {
+ frag_flags = AMDGPU_PTE_FRAG(frag);
+ frag_end = start + (1 << frag);
+ }
+
+ r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
+ flags | frag_flags);
if (r)
return r;
- dst += (frag_start - start) * AMDGPU_GPU_PAGE_SIZE;
- }
- /* handle the area in the middle */
- r = amdgpu_vm_update_ptes(params, frag_start, frag_end, dst,
- flags | frag_flags);
- if (r)
- return r;
-
- /* handle the 4K area at the end */
- if (frag_end != end) {
- dst += (frag_end - frag_start) * AMDGPU_GPU_PAGE_SIZE;
- r = amdgpu_vm_update_ptes(params, frag_end, end, dst, flags);
+ dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
+ start = frag_end;
}
- return r;
+
+ return 0;
}
/**
@@ -1456,7 +1524,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*
* @adev: amdgpu_device pointer
* @exclusive: fence we need to sync to
- * @src: address where to copy page table entries from
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @start: start of mapped range
@@ -1470,7 +1537,6 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
- uint64_t src,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
uint64_t start, uint64_t last,
@@ -1488,7 +1554,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.src = src;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -1517,10 +1582,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
nptes = last - start + 1;
/*
- * reserve space for one command every (1 << BLOCK_SIZE)
+ * reserve space for two commands every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
+ *
+ * The second command is for the shadow pagetables.
*/
- ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
+ ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1) * 2;
/* padding, etc. */
ndw = 64;
@@ -1528,15 +1595,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* one PDE write for each huge page */
ndw += ((nptes >> adev->vm_manager.block_size) + 1) * 6;
- if (src) {
- /* only copy commands needed */
- ndw += ncmds * 7;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else if (pages_addr) {
+ if (pages_addr) {
/* copy commands needed */
- ndw += ncmds * 7;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
/* and also PTEs */
ndw += nptes * 2;
@@ -1545,10 +1606,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else {
/* set page commands needed */
- ndw += ncmds * 10;
+ ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
- /* two extra commands for begin/end of fragment */
- ndw += 2 * 10;
+ /* extra commands for begin/end fragments */
+ ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
+ * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1559,7 +1621,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.ib = &job->ibs[0];
- if (!src && pages_addr) {
+ if (pages_addr) {
uint64_t *pte;
unsigned i;
@@ -1580,12 +1642,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
+ r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
owner);
if (r)
goto error_free;
- r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
if (r)
goto error_free;
@@ -1600,14 +1662,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_bo_fence(vm->root.bo, f, true);
+ amdgpu_bo_fence(vm->root.base.bo, f, true);
dma_fence_put(*fence);
*fence = f;
return 0;
error_free:
amdgpu_job_free(job);
- amdgpu_vm_invalidate_level(&vm->root);
+ amdgpu_vm_invalidate_level(vm, &vm->root);
return r;
}
@@ -1636,7 +1698,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->start;
+ uint64_t pfn, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1691,8 +1753,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
addr += pfn << PAGE_SHIFT;
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, exclusive,
- src, pages_addr, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, exclusive, pages_addr, vm,
start, last, flags, addr,
fence);
if (r)
@@ -1730,7 +1791,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive;
+ struct dma_fence *exclusive, **last_update;
uint64_t flags;
int r;
@@ -1756,38 +1817,43 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
else
flags = 0x0;
- spin_lock(&vm->status_lock);
- if (!list_empty(&bo_va->base.vm_status))
+ if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
+ last_update = &vm->last_update;
+ else
+ last_update = &bo_va->last_pt_update;
+
+ if (!clear && bo_va->base.moved) {
+ bo_va->base.moved = false;
list_splice_init(&bo_va->valids, &bo_va->invalids);
- spin_unlock(&vm->status_lock);
+
+ } else if (bo_va->cleared != clear) {
+ list_splice_init(&bo_va->valids, &bo_va->invalids);
+ }
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
mapping, flags, nodes,
- &bo_va->last_pt_update);
+ last_update);
if (r)
return r;
}
- if (trace_amdgpu_vm_bo_mapping_enabled()) {
- list_for_each_entry(mapping, &bo_va->valids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
-
- list_for_each_entry(mapping, &bo_va->invalids, list)
- trace_amdgpu_vm_bo_mapping(mapping);
+ if (vm->use_cpu_for_update) {
+ /* Flush HDP */
+ mb();
+ amdgpu_gart_flush_gpu_tlb(adev, 0);
}
spin_lock(&vm->status_lock);
- list_splice_init(&bo_va->invalids, &bo_va->valids);
list_del_init(&bo_va->base.vm_status);
- if (clear)
- list_add(&bo_va->base.vm_status, &vm->cleared);
spin_unlock(&vm->status_lock);
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ list_splice_init(&bo_va->invalids, &bo_va->valids);
+ bo_va->cleared = clear;
+
+ if (trace_amdgpu_vm_bo_mapping_enabled()) {
+ list_for_each_entry(mapping, &bo_va->valids, list)
+ trace_amdgpu_vm_bo_mapping(mapping);
}
return 0;
@@ -1895,7 +1961,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
- struct reservation_object *resv = vm->root.bo->tbo.resv;
+ struct reservation_object *resv = vm->root.base.bo->tbo.resv;
struct dma_fence *excl, **shared;
unsigned i, shared_count;
int r;
@@ -1953,7 +2019,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
if (vm->pte_support_ats)
init_pte_value = AMDGPU_PTE_SYSTEM;
- r = amdgpu_vm_bo_update_mapping(adev, NULL, 0, NULL, vm,
+ r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
mapping->start, mapping->last,
init_pte_value, 0, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -1975,29 +2041,35 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_clear_moved - clear moved BOs in the PT
+ * amdgpu_vm_handle_moved - handle moved BOs in the PT
*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @sync: sync object to add fences to
*
- * Make sure all moved BOs are cleared in the PT.
+ * Make sure all BOs which are moved are updated in the PTs.
* Returns 0 for success.
*
- * PTs have to be reserved and mutex must be locked!
+ * PTs have to be reserved!
*/
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync)
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- struct amdgpu_bo_va *bo_va = NULL;
+ bool clear;
int r = 0;
spin_lock(&vm->status_lock);
while (!list_empty(&vm->moved)) {
+ struct amdgpu_bo_va *bo_va;
+
bo_va = list_first_entry(&vm->moved,
struct amdgpu_bo_va, base.vm_status);
spin_unlock(&vm->status_lock);
- r = amdgpu_vm_bo_update(adev, bo_va, true);
+ /* Per VM BOs never need to bo cleared in the page tables */
+ clear = bo_va->base.bo->tbo.resv != vm->root.base.bo->tbo.resv;
+
+ r = amdgpu_vm_bo_update(adev, bo_va, clear);
if (r)
return r;
@@ -2005,9 +2077,6 @@ int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
spin_unlock(&vm->status_lock);
- if (bo_va)
- r = amdgpu_sync_fence(adev, sync, bo_va->last_pt_update);
-
return r;
}
@@ -2049,6 +2118,39 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
return bo_va;
}
+
+/**
+ * amdgpu_vm_bo_insert_mapping - insert a new mapping
+ *
+ * @adev: amdgpu_device pointer
+ * @bo_va: bo_va to store the address
+ * @mapping: the mapping to insert
+ *
+ * Insert a new mapping into all structures.
+ */
+static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ struct amdgpu_bo_va_mapping *mapping)
+{
+ struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_bo *bo = bo_va->base.bo;
+
+ mapping->bo_va = bo_va;
+ list_add(&mapping->list, &bo_va->invalids);
+ amdgpu_vm_it_insert(mapping, &vm->va);
+
+ if (mapping->flags & AMDGPU_PTE_PRT)
+ amdgpu_vm_prt_get(adev);
+
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&vm->status_lock);
+ if (list_empty(&bo_va->base.vm_status))
+ list_add(&bo_va->base.vm_status, &vm->moved);
+ spin_unlock(&vm->status_lock);
+ }
+ trace_amdgpu_vm_bo_map(bo_va, mapping);
+}
+
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@@ -2100,17 +2202,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (!mapping)
return -ENOMEM;
- INIT_LIST_HEAD(&mapping->list);
mapping->start = saddr;
mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2137,7 +2234,6 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
{
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo = bo_va->base.bo;
- struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
@@ -2171,11 +2267,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
mapping->offset = offset;
mapping->flags = flags;
- list_add(&mapping->list, &bo_va->invalids);
- amdgpu_vm_it_insert(mapping, &vm->va);
-
- if (flags & AMDGPU_PTE_PRT)
- amdgpu_vm_prt_get(adev);
+ amdgpu_vm_bo_insert_map(adev, bo_va, mapping);
return 0;
}
@@ -2221,6 +2313,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -2306,6 +2399,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
if (tmp->last > eaddr)
tmp->last = eaddr;
+ tmp->bo_va = NULL;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
}
@@ -2332,6 +2426,19 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_bo_lookup_mapping - find mapping by address
+ *
+ * @vm: the requested VM
+ *
+ * Find a mapping by it's address.
+ */
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr)
+{
+ return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2356,6 +2463,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
amdgpu_vm_it_remove(mapping, &vm->va);
+ mapping->bo_va = NULL;
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
@@ -2380,15 +2488,36 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* Mark @bo as invalid.
*/
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo)
+ struct amdgpu_bo *bo, bool evicted)
{
struct amdgpu_vm_bo_base *bo_base;
list_for_each_entry(bo_base, &bo->va, bo_list) {
+ struct amdgpu_vm *vm = bo_base->vm;
+
+ bo_base->moved = true;
+ if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&bo_base->vm_status, &vm->evicted);
+ else
+ list_move_tail(&bo_base->vm_status,
+ &vm->evicted);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
+ if (bo->tbo.type == ttm_bo_type_kernel) {
+ spin_lock(&bo_base->vm->status_lock);
+ if (list_empty(&bo_base->vm_status))
+ list_add(&bo_base->vm_status, &vm->relocated);
+ spin_unlock(&bo_base->vm->status_lock);
+ continue;
+ }
+
spin_lock(&bo_base->vm->status_lock);
if (list_empty(&bo_base->vm_status))
- list_add(&bo_base->vm_status,
- &bo_base->vm->moved);
+ list_add(&bo_base->vm_status, &vm->moved);
spin_unlock(&bo_base->vm->status_lock);
}
}
@@ -2458,7 +2587,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size, uint32_
* Init @vm fields.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context)
+ int vm_context, unsigned int pasid)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
AMDGPU_VM_PTE_COUNT(adev) * 8);
@@ -2474,8 +2603,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
spin_lock_init(&vm->status_lock);
+ INIT_LIST_HEAD(&vm->evicted);
+ INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
- INIT_LIST_HEAD(&vm->cleared);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */
@@ -2506,7 +2636,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
"CPU update of VM recommended only for large BAR system\n");
- vm->last_dir_update = NULL;
+ vm->last_update = NULL;
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED;
@@ -2519,30 +2649,46 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_bo_create(adev, amdgpu_vm_bo_size(adev, 0), align, true,
AMDGPU_GEM_DOMAIN_VRAM,
flags,
- NULL, NULL, init_pde_value, &vm->root.bo);
+ NULL, NULL, init_pde_value, &vm->root.base.bo);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->root.bo, false);
- if (r)
- goto error_free_root;
-
- vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
+ vm->root.base.vm = vm;
+ list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
+ INIT_LIST_HEAD(&vm->root.base.vm_status);
if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(vm->root.bo, NULL);
+ r = amdgpu_bo_reserve(vm->root.base.bo, false);
if (r)
goto error_free_root;
+
+ r = amdgpu_bo_kmap(vm->root.base.bo, NULL);
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ if (r)
+ goto error_free_root;
+ }
+
+ if (pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
+ GFP_ATOMIC);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ if (r < 0)
+ goto error_free_root;
+
+ vm->pasid = pasid;
}
- amdgpu_bo_unreserve(vm->root.bo);
+ INIT_KFIFO(vm->faults);
return 0;
error_free_root:
- amdgpu_bo_unref(&vm->root.bo->shadow);
- amdgpu_bo_unref(&vm->root.bo);
- vm->root.bo = NULL;
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+ amdgpu_bo_unref(&vm->root.base.bo);
+ vm->root.base.bo = NULL;
error_free_sched_entity:
amd_sched_entity_fini(&ring->sched, &vm->entity);
@@ -2561,9 +2707,11 @@ static void amdgpu_vm_free_levels(struct amdgpu_vm_pt *level)
{
unsigned i;
- if (level->bo) {
- amdgpu_bo_unref(&level->bo->shadow);
- amdgpu_bo_unref(&level->bo);
+ if (level->base.bo) {
+ list_del(&level->base.bo_list);
+ list_del(&level->base.vm_status);
+ amdgpu_bo_unref(&level->base.bo->shadow);
+ amdgpu_bo_unref(&level->base.bo);
}
if (level->entries)
@@ -2586,8 +2734,21 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ u64 fault;
int i;
+ /* Clear pending page faults from IH when the VM is destroyed */
+ while (kfifo_get(&vm->faults, &fault))
+ amdgpu_ih_clear_fault(adev, fault);
+
+ if (vm->pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
+
amd_sched_entity_fini(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -2610,7 +2771,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
}
amdgpu_vm_free_levels(&vm->root);
- dma_fence_put(vm->last_dir_update);
+ dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vm_free_reserved_vmid(adev, vm, i);
}
@@ -2668,6 +2829,8 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
adev->vm_manager.vm_update_mode = 0;
#endif
+ idr_init(&adev->vm_manager.pasid_idr);
+ spin_lock_init(&adev->vm_manager.pasid_lock);
}
/**
@@ -2681,6 +2844,9 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
{
unsigned i, j;
+ WARN_ON(!idr_is_empty(&adev->vm_manager.pasid_idr));
+ idr_destroy(&adev->vm_manager.pasid_idr);
+
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 6716355403ec..0af090667dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -25,6 +25,7 @@
#define __AMDGPU_VM_H__
#include <linux/rbtree.h>
+#include <linux/idr.h>
#include "gpu_scheduler.h"
#include "amdgpu_sync.h"
@@ -105,17 +106,24 @@ struct amdgpu_vm_bo_base {
/* protected by spinlock */
struct list_head vm_status;
+
+ /* protected by the BO being reserved */
+ bool moved;
};
struct amdgpu_vm_pt {
- struct amdgpu_bo *bo;
- uint64_t addr;
+ struct amdgpu_vm_bo_base base;
+ uint64_t addr;
/* array of page tables, one for each directory entry */
- struct amdgpu_vm_pt *entries;
- unsigned last_entry_used;
+ struct amdgpu_vm_pt *entries;
+ unsigned last_entry_used;
};
+#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
+#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
+#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
+
struct amdgpu_vm {
/* tree of virtual addresses mapped */
struct rb_root_cached va;
@@ -123,19 +131,21 @@ struct amdgpu_vm {
/* protecting invalidated */
spinlock_t status_lock;
+ /* BOs who needs a validation */
+ struct list_head evicted;
+
+ /* PT BOs which relocated and their parent need an update */
+ struct list_head relocated;
+
/* BOs moved, but not yet updated in the PT */
struct list_head moved;
- /* BOs cleared in the PT because of a move */
- struct list_head cleared;
-
/* BO mappings freed, but not yet updated in the PT */
struct list_head freed;
/* contains the page directory */
struct amdgpu_vm_pt root;
- struct dma_fence *last_dir_update;
- uint64_t last_eviction_counter;
+ struct dma_fence *last_update;
/* protecting freed */
spinlock_t freed_lock;
@@ -143,8 +153,9 @@ struct amdgpu_vm {
/* Scheduler entity for page table updates */
struct amd_sched_entity entity;
- /* client id */
+ /* client id and PASID (TODO: replace client_id with PASID) */
u64 client_id;
+ unsigned int pasid;
/* dedicated to vm */
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
@@ -153,6 +164,9 @@ struct amdgpu_vm {
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
+
+ /* Up to 128 pending page faults */
+ DECLARE_KFIFO(faults, u64, 128);
};
struct amdgpu_vm_id {
@@ -215,16 +229,25 @@ struct amdgpu_vm_manager {
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
*/
int vm_update_mode;
+
+ /* PASID to VM mapping, will be used in interrupt context to
+ * look up VM of a page fault
+ */
+ struct idr pasid_idr;
+ spinlock_t pasid_lock;
};
+int amdgpu_vm_alloc_pasid(unsigned int bits);
+void amdgpu_vm_free_pasid(unsigned int pasid);
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int vm_context);
+ int vm_context, unsigned int pasid);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry);
+bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
@@ -243,13 +266,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
-int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- struct amdgpu_sync *sync);
+int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
- struct amdgpu_bo *bo);
+ struct amdgpu_bo *bo, bool evicted);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -269,6 +292,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size);
+struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
+ uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index d69aa2e179bb..69500a8b4e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
idx = 0x80;
str = CSTR(idx);
- if (*str != '\0')
+ if (*str != '\0') {
pr_info("ATOM BIOS: %s\n", str);
+ strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
+ }
+
return ctx;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index ddd8045accf3..a39170991afe 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -140,6 +140,7 @@ struct atom_context {
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
+ char vbios_version[20];
};
extern int amdgpu_atom_debug;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index cb508a211b2f..68ce1bdaf2fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
u32 target_tdp);
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
@@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
return ret;
}
-static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void ci_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
pi->uvd_power_gated = gate;
@@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
}
}
-static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool ci_dpm_vblank_too_short(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
@@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int ci_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
u32 tmp;
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.no_fan)
@@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
switch (mode) {
case AMD_FAN_CTRL_NONE:
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 ci_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (pi->fan_is_controlled_by_smc)
@@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
}
-static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
+static int ci_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
u32 tmp, levels, i;
int ret;
@@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void ci_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
ci_update_current_ps(adev, boot_ps);
}
-static int ci_dpm_set_power_state(struct amdgpu_device *adev)
+static int ci_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void ci_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
ci_program_display_gap(adev);
}
@@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
}
static void
-ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ci_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amdgpu_ps *rps = &pi->current_rps;
u32 sclk = ci_get_average_sclk_freq(adev);
@@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
seq_printf(m, "GPU load: %u %%\n", activity_percent);
}
-static void ci_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void ci_dpm_print_power_state(void *handle, void *current_ps)
{
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct ci_ps *ps = ci_get_ps(rps);
struct ci_pl *pl;
int i;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
}
-static int ci_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int ci_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct ci_ps *ci_cps;
struct ci_ps *ci_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- ci_cps = ci_get_ps(cps);
- ci_rps = ci_get_ps(rps);
+ ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
+ ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
if (ci_cps == NULL) {
*equal = false;
@@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 ci_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
@@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
}
/* get temperature in millidegrees */
-static int ci_dpm_get_temp(struct amdgpu_device *adev)
+static int ci_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
@@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- ci_dpm_set_dpm_funcs(adev);
ci_dpm_set_irq_funcs(adev);
return 0;
@@ -6551,9 +6570,10 @@ static int ci_dpm_set_powergating_state(void *handle,
return 0;
}
-static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
+static int ci_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
@@ -6618,9 +6638,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
return size;
}
-static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
+static int ci_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
@@ -6664,8 +6685,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_sclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6680,8 +6702,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_sclk_table =
@@ -6698,8 +6721,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
+static int ci_dpm_get_mclk_od(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6714,8 +6738,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
return value;
}
-static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
struct ci_single_dpm_table *golden_mclk_table =
@@ -6732,9 +6757,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
return 0;
}
-static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_get_power_profile_state(void *handle,
struct amd_pp_profile *query)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !query)
@@ -6851,9 +6877,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
return result;
}
-static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_set_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
int ret = -1;
@@ -6906,9 +6933,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
+static int ci_dpm_reset_power_profile_state(void *handle,
struct amd_pp_profile *request)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
if (!pi || !request)
@@ -6927,9 +6955,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
return -EINVAL;
}
-static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
+static int ci_dpm_switch_power_profile(void *handle,
enum amd_pp_profile_type type)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct ci_power_info *pi = ci_get_pi(adev);
struct amd_pp_profile request = {0};
@@ -6944,11 +6973,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
return 0;
}
-static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int ci_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
u32 activity_percent = 50;
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* size must be at least 4 bytes for all sensors */
if (*size < 4)
@@ -7003,7 +7033,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
.set_powergating_state = ci_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
+const struct amd_pm_funcs ci_dpm_funcs = {
.get_temperature = &ci_dpm_get_temp,
.pre_set_power_state = &ci_dpm_pre_set_power_state,
.set_power_state = &ci_dpm_set_power_state,
@@ -7035,12 +7065,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
.read_sensor = ci_dpm_read_sensor,
};
-static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &ci_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
.set = ci_dpm_set_interrupt_state,
.process = ci_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
index b1c8e7b446ea..c7b4349f6319 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/cik_dpm.h
@@ -26,5 +26,6 @@
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
-
+extern const struct amd_pm_funcs ci_dpm_funcs;
+extern const struct amd_pm_funcs kv_dpm_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index b8918432c572..07d3d895da10 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -228,6 +228,19 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
* [127:96] - reserved
*/
+/**
+ * cik_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
/**
* cik_ih_decode_iv - decode an interrupt vector
*
@@ -433,6 +446,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cik_ih_funcs = {
.get_wptr = cik_ih_get_wptr,
+ .prescreen_iv = cik_ih_prescreen_iv,
.decode_iv = cik_ih_decode_iv,
.set_rptr = cik_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index f508f4d01e4a..60cecd117705 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = cik_sdma_vm_copy_pte,
+
.write_pte = cik_sdma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = cik_sdma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 0c1209cdd1cb..b6cdf4afaf46 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -208,6 +208,19 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * cz_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
+/**
* cz_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -414,6 +427,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
static const struct amdgpu_ih_funcs cz_ih_funcs = {
.get_wptr = cz_ih_get_wptr,
+ .prescreen_iv = cz_ih_prescreen_iv,
.decode_iv = cz_ih_decode_iv,
.set_rptr = cz_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc260c13b1da..dfc10b1baea0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -125,24 +125,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
@@ -918,8 +933,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
+ err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
@@ -929,8 +953,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
+ err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.me_fw);
@@ -941,8 +974,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
+ err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
@@ -1012,8 +1054,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ }
if (err)
goto out;
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
@@ -1025,8 +1076,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (err == -ENOENT) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
+ } else {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ }
if (!err) {
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
if (err)
@@ -2053,6 +2113,7 @@ static int gfx_v8_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
@@ -4577,12 +4638,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
- if (!(adev->flags & AMD_IS_APU)) {
- mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
- + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
- }
+ mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -4753,7 +4812,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v8_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4790,7 +4849,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
struct vi_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -4802,7 +4861,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
@@ -4975,12 +5034,69 @@ static int gfx_v8_0_hw_init(void *handle)
return r;
}
+static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
static int gfx_v8_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 69182eeca264..deeaee1457ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -66,38 +66,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
- {SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
- SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
+ { SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
+ SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
};
static const u32 golden_settings_gc_9_0[] =
@@ -352,6 +384,25 @@ err1:
return r;
}
+
+static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
+{
+ release_firmware(adev->gfx.pfp_fw);
+ adev->gfx.pfp_fw = NULL;
+ release_firmware(adev->gfx.me_fw);
+ adev->gfx.me_fw = NULL;
+ release_firmware(adev->gfx.ce_fw);
+ adev->gfx.ce_fw = NULL;
+ release_firmware(adev->gfx.rlc_fw);
+ adev->gfx.rlc_fw = NULL;
+ release_firmware(adev->gfx.mec_fw);
+ adev->gfx.mec_fw = NULL;
+ release_firmware(adev->gfx.mec2_fw);
+ adev->gfx.mec2_fw = NULL;
+
+ kfree(adev->gfx.rlc.register_list_format);
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -1120,30 +1171,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
int r;
- u32 data;
- u32 size;
- u32 base;
+ u32 data, base;
if (!amdgpu_ngg)
return 0;
/* Program buffer size */
- data = 0;
- size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_POS].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_POS].size >> 8);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
- data = 0;
- size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
-
- size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
- data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
-
+ data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
+ data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
+ adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
/* Program buffer base address */
@@ -1306,7 +1349,10 @@ static int gfx_v9_0_sw_init(void *handle)
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
- sprintf(ring->name, "gfx");
+ if (!i)
+ sprintf(ring->name, "gfx");
+ else
+ sprintf(ring->name, "gfx_%d", i);
ring->use_doorbell = true;
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
@@ -1346,7 +1392,7 @@ static int gfx_v9_0_sw_init(void *handle)
return r;
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
- r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
+ r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
if (r)
return r;
@@ -1398,9 +1444,11 @@ static int gfx_v9_0_sw_fini(void *handle)
amdgpu_gfx_compute_mqd_sw_fini(adev);
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
amdgpu_gfx_kiq_fini(adev);
+ amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ gfx_v9_0_free_microcode(adev);
return 0;
}
@@ -1740,11 +1788,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
{
- u32 tmp = 0;
-
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
- tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
}
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
@@ -1822,16 +1866,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if (default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
+ enable ? 1 : 0);
+ if (default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
@@ -1841,16 +1880,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
+ enable ? 1 : 0);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
@@ -1860,16 +1894,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
uint32_t default_data = 0;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
-
- if (enable == true) {
- data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- } else {
- data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
- if(default_data != data)
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
- }
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ CP_PG_DISABLE,
+ enable ? 0 : 1);
+ if(default_data != data)
+ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
@@ -1878,10 +1907,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_POWER_GATING_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1892,10 +1920,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ GFX_PIPELINE_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
@@ -1910,10 +1937,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ STATIC_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1924,10 +1950,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
uint32_t data, default_data;
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
- if (enable == true)
- data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
- else
- data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
+ data = REG_SET_FIELD(data, RLC_PG_CNTL,
+ DYN_PER_CU_PG_ENABLE,
+ enable ? 1 : 0);
if(default_data != data)
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
}
@@ -1967,13 +1992,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
-
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
- WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
-
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
-
gfx_v9_0_wait_for_rlc_serdes(adev);
}
@@ -2045,8 +2065,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ gfx_v9_0_init_csb(adev);
return 0;
+ }
gfx_v9_0_rlc_stop(adev);
@@ -2463,6 +2485,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
mqd->compute_misc_reserved = 0x00000003;
+ mqd->dynamic_cu_mask_addr_lo =
+ lower_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+ mqd->dynamic_cu_mask_addr_hi =
+ upper_32_bits(ring->mqd_gpu_addr
+ + offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
+
eop_base_addr = ring->eop_gpu_addr >> 8;
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
@@ -2486,10 +2515,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
DOORBELL_SOURCE, 0);
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_HIT, 0);
- }
- else
+ } else {
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
DOORBELL_EN, 0);
+ }
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -2692,10 +2721,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
gfx_v9_0_kiq_setting(ring);
- if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2707,7 +2736,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
} else {
- memset((void *)mqd, 0, sizeof(*mqd));
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2716,7 +2747,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
}
return 0;
@@ -2728,8 +2759,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
- memset((void *)mqd, 0, sizeof(*mqd));
+ if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
+ memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
+ ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
+ ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
mutex_lock(&adev->srbm_mutex);
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
gfx_v9_0_mqd_init(ring);
@@ -2737,11 +2770,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
mutex_unlock(&adev->srbm_mutex);
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
- } else if (adev->gfx.in_reset) { /* for GPU_RESET case */
+ memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
+ } else if (adev->in_sriov_reset) { /* for GPU_RESET case */
/* reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
- memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+ memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
/* reset ring buffer */
ring->wptr = 0;
@@ -2882,12 +2915,70 @@ static int gfx_v9_0_hw_init(void *handle)
return r;
}
+static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = kiq_ring->adev;
+ uint32_t scratch, tmp = 0;
+ int r, i;
+
+ r = amdgpu_gfx_scratch_get(adev, &scratch);
+ if (r) {
+ DRM_ERROR("Failed to get scratch reg (%d).\n", r);
+ return r;
+ }
+ WREG32(scratch, 0xCAFEDEAD);
+
+ r = amdgpu_ring_alloc(kiq_ring, 10);
+ if (r) {
+ DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+ }
+
+ /* unmap queues */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
+ amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
+ PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
+ PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
+ PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
+ amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ amdgpu_ring_write(kiq_ring, 0);
+ /* write to scratch for completion */
+ amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
+ amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
+ amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
+ amdgpu_ring_commit(kiq_ring);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ tmp = RREG32(scratch);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i >= adev->usec_timeout) {
+ DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
+ r = -EINVAL;
+ }
+ amdgpu_gfx_scratch_free(adev, scratch);
+ return r;
+}
+
+
static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i;
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+
+ /* disable KCQ to avoid CPC touch memory not valid anymore */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++)
+ gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
+
if (amdgpu_sriov_vf(adev)) {
pr_debug("For SRIOV client, shouldn't do anything.\n");
return 0;
@@ -2930,15 +3021,10 @@ static bool gfx_v9_0_is_idle(void *handle)
static int gfx_v9_0_wait_for_idle(void *handle)
{
unsigned i;
- u32 tmp;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (i = 0; i < adev->usec_timeout; i++) {
- /* read MC_STATUS */
- tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
- GRBM_STATUS__GUI_ACTIVE_MASK;
-
- if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
+ if (gfx_v9_0_is_idle(handle))
return 0;
udelay(1);
}
@@ -3499,7 +3585,9 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask, reg_mem_engine;
struct nbio_hdp_flush_reg *nbio_hf_reg;
- if (ring->adev->asic_type == CHIP_VEGA10)
+ if (ring->adev->flags & AMD_IS_APU)
+ nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
+ else
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
@@ -3528,7 +3616,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
gfx_v9_0_write_data_to_reg(ring, 0, true,
- SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
+ SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
}
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
@@ -3757,6 +3845,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
+static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
+ amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+}
+
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
{
uint32_t dw2 = 0;
@@ -3764,6 +3858,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
+ gfx_v9_0_ring_emit_tmz(ring, true);
+
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -3814,12 +3910,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
-{
- amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
-}
-
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
{
struct amdgpu_device *adev = ring->adev;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6c8040e616c4..c17996e18086 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 5be9c83dfcf7..f4603a7c8ef3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
@@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
gmc_v6_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index eace9e7182c8..b0528ca9207b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
gmc_v7_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 3b3326daf32b..f368cfe2f585 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- amdgpu_vm_adjust_size(adev, 64, 4);
+ amdgpu_vm_adjust_size(adev, 64, 9);
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
@@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
gmc_v8_0_gart_fini(adev);
amdgpu_gem_force_release(adev);
amdgpu_bo_fini(adev);
+ release_firmware(adev->mc.fw);
+ adev->mc.fw = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d04d0b123212..621699331e09 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -32,6 +32,8 @@
#include "vega10/DC/dce_12_0_offset.h"
#include "vega10/DC/dce_12_0_sh_mask.h"
#include "vega10/vega10_enum.h"
+#include "vega10/MMHUB/mmhub_1_0_offset.h"
+#include "vega10/ATHUB/athub_1_0_offset.h"
#include "soc15_common.h"
@@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
0xf6e, 0x0fffffff, 0x00000000,
};
+static const u32 golden_settings_mmhub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
+};
+
+static const u32 golden_settings_athub_1_0_0[] =
+{
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
+ SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
+};
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
enum amdgpu_interrupt_state state)
{
struct amdgpu_vmhub *hub;
- u32 tmp, reg, bits, i;
+ u32 tmp, reg, bits, i, j;
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
@@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp &= ~bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
}
break;
case AMDGPU_IRQ_STATE_ENABLE:
- /* MM HUB */
- hub = &adev->vmhub[AMDGPU_MMHUB];
- for (i = 0; i< 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
- }
-
- /* GFX HUB */
- hub = &adev->vmhub[AMDGPU_GFXHUB];
- for (i = 0; i < 16; i++) {
- reg = hub->vm_context0_cntl + i;
- tmp = RREG32(reg);
- tmp |= bits;
- WREG32(reg, tmp);
+ for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
+ hub = &adev->vmhub[j];
+ for (i = 0; i < 16; i++) {
+ reg = hub->vm_context0_cntl + i;
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
}
- break;
default:
break;
}
@@ -682,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_VEGA10:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_mmhub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
case CHIP_RAVEN:
+ amdgpu_program_register_sequence(adev,
+ golden_settings_athub_1_0_0,
+ (const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
default:
break;
@@ -713,12 +719,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- /* After HDP is initialized, flush HDP.*/
- if (adev->flags & AMD_IS_APU)
- nbio_v7_0_hdp_flush(adev);
- else
- nbio_v6_1_hdp_flush(adev);
-
switch (adev->asic_type) {
case CHIP_RAVEN:
mmhub_v1_0_initialize_power_gating(adev);
@@ -736,13 +736,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
- tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
- WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
+ WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ /* After HDP is initialized, flush HDP.*/
+ if (adev->flags & AMD_IS_APU)
+ nbio_v7_0_hdp_flush(adev);
+ else
+ nbio_v6_1_hdp_flush(adev);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
value = false;
@@ -751,7 +754,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
gfxhub_v1_0_set_fault_enable_default(adev, value);
mmhub_v1_0_set_fault_enable_default(adev, value);
-
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -770,17 +772,11 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- u32 tmp;
-
/* Lockout access through VGA aperture*/
- tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
- WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
/* disable VGA render */
- tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
- tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
r = gmc_v9_0_gart_enable(adev);
@@ -822,9 +818,7 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gmc_v9_0_hw_fini(adev);
-
- return 0;
+ return gmc_v9_0_hw_fini(adev);
}
static int gmc_v9_0_resume(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 7a0ea27ac429..65ed6d3a8f05 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -208,6 +208,19 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * iceland_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
+/**
* iceland_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -412,6 +425,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
.get_wptr = iceland_ih_get_wptr,
+ .prescreen_iv = iceland_ih_prescreen_iv,
.decode_iv = iceland_ih_decode_iv,
.set_rptr = iceland_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3bbf2ccfca89..b57399a462c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -42,7 +42,6 @@
#define KV_MINIMUM_ENGINE_CLOCK 800
#define SMC_RAM_END 0x40000
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
bool enable);
@@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
+static void kv_dpm_powergate_uvd(void *handle, bool gate);
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
adev->pm.dpm.requested_ps = &pi->requested_rps;
}
-static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
+static void kv_dpm_enable_bapm(void *handle, bool enable)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
return kv_enable_acp_dpm(adev, !gate);
}
-static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_uvd(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
int ret;
@@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
return ret;
}
-static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
+static int kv_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
ret = kv_force_dpm_highest(adev);
@@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static int kv_dpm_set_power_state(struct amdgpu_device *adev)
+static int kv_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
struct amdgpu_ps *old_ps = &pi->current_rps;
@@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void kv_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct amdgpu_ps *new_ps = &pi->requested_rps;
@@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
}
static void
-kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+kv_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
u32 current_index =
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
@@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
}
static void
-kv_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+kv_dpm_print_power_state(void *handle, void *request_ps)
{
int i;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
struct kv_ps *ps = kv_get_ps(rps);
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dpm_print_class_info(rps->class, rps->class2);
amdgpu_dpm_print_cap_info(rps->caps);
@@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void kv_dpm_display_configuration_changed(void *handle)
{
}
-static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
@@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->levels[requested_state->num_levels - 1].sclk;
}
-static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 kv_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
return pi->sys_info.bootup_uma_clk;
}
/* get temperature in millidegrees */
-static int kv_dpm_get_temp(struct amdgpu_device *adev)
+static int kv_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = RREG32_SMC(0xC0300E0C);
@@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- kv_dpm_set_dpm_funcs(adev);
kv_dpm_set_irq_funcs(adev);
return 0;
@@ -3222,14 +3231,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
}
-static int kv_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int kv_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct kv_ps *kv_cps;
struct kv_ps *kv_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
@@ -3262,9 +3274,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int kv_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
uint32_t sclk;
u32 pl_index =
@@ -3312,7 +3325,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
.set_powergating_state = kv_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
+const struct amd_pm_funcs kv_dpm_funcs = {
.get_temperature = &kv_dpm_get_temp,
.pre_set_power_state = &kv_dpm_pre_set_power_state,
.set_power_state = &kv_dpm_set_power_state,
@@ -3330,12 +3343,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
.read_sensor = &kv_dpm_read_sensor,
};
-static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &kv_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
.set = kv_dpm_set_interrupt_state,
.process = kv_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 74cb647da30e..cc21c4bdec27 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
{0x135, 0x12a810},
{0x149, 0x7a82c}
};
-#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
+#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
#define PCTL0_RENG_EXEC_END_PTR 0x151
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
@@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
{0x1f0, 0x5000a7f6},
{0x1f1, 0x5000a7e4}
};
-#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
+#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
@@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 1e91b9a1c591..67e78576a9eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 5000
+#define AI_MAILBOX_TIMEDOUT 12000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index c791d73d2d54..f13dc6cc158f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -23,7 +23,7 @@
#ifndef __MXGPU_VI_H__
#define __MXGPU_VI_H__
-#define VI_MAILBOX_TIMEDOUT 5000
+#define VI_MAILBOX_TIMEDOUT 12000
#define VI_MAILBOX_RESET_TIME 12
/* VI mailbox messages request */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index f7cf994b1da2..dea7c909ca5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -35,6 +35,8 @@
#include "raven1/GC/gc_9_1_offset.h"
#include "raven1/SDMA0/sdma0_4_1_offset.h"
+MODULE_FIRMWARE("amdgpu/raven_asd.bin");
+
static int
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
{
@@ -136,15 +138,13 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
{
int ret;
uint64_t fw_mem_mc_addr = ucode->mc_addr;
- struct common_firmware_header *header;
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
- header = (struct common_firmware_header *)ucode->fw;
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
- cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
+ cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
if (ret)
@@ -209,7 +209,7 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -229,6 +229,19 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v10_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -245,15 +258,20 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
+ uint32_t ring_size_dw = ring->ring_size / 4;
+ uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
/* KM (GPCOM) prepare write pointer */
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
/* Update KM RB frame pointer to new frame */
- if ((psp_write_ptr_reg % ring->ring_size) == 0)
+ if ((psp_write_ptr_reg % ring_size_dw) == 0)
write_frame = ring->ring_mem;
else
- write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
+ write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
+
+ /* Initialize KM RB frame */
+ memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
/* Update KM RB frame */
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
@@ -263,8 +281,7 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
write_frame->fence_value = index;
/* Update the write Pointer in DWORDs */
- psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4;
- psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg;
+ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
return 0;
@@ -390,3 +407,10 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
return true;
}
+
+
+int psp_v10_0_mode1_reset(struct psp_context *psp)
+{
+ DRM_INFO("psp mode 1 reset not supported now! \n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
index e76cde2f01f9..451e8308303f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.h
@@ -34,6 +34,8 @@ extern int psp_v10_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v10_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
@@ -43,4 +45,6 @@ extern int psp_v10_0_cmd_submit(struct psp_context *psp,
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
+
+extern int psp_v10_0_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 2a535a4b8d5b..cee5c396b277 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -319,7 +319,7 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
return ret;
}
-int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
{
int ret = 0;
struct psp_ring *ring;
@@ -339,6 +339,19 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
0x80000000, 0x80000000, false);
+ return ret;
+}
+
+int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v3_1_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -517,3 +530,37 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
+
+int psp_v3_1_mode1_reset(struct psp_context *psp)
+{
+ int ret;
+ uint32_t offset;
+ struct amdgpu_device *adev = psp->adev;
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
+
+ if (ret) {
+ DRM_INFO("psp is not working correctly before mode1 reset!\n");
+ return -EINVAL;
+ }
+
+ /*send the mode 1 reset command*/
+ WREG32(offset, 0x70000);
+
+ mdelay(1000);
+
+ offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
+
+ ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
+
+ if (ret) {
+ DRM_INFO("psp mode 1 reset failed!\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("psp mode1 reset succeed \n");
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
index 9dcd0b25c4c6..b05dbada7751 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.h
@@ -41,6 +41,8 @@ extern int psp_v3_1_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type);
+extern int psp_v3_1_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type);
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
enum psp_ring_type ring_type);
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
@@ -51,4 +53,5 @@ extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
struct amdgpu_firmware_info *ucode,
enum AMDGPU_UCODE_ID ucode_type);
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
+extern int psp_v3_1_mode1_reset(struct psp_context *psp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index f2d0710258cb..acdee3a4602c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -1324,8 +1324,13 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v2_4_vm_copy_pte,
+
.write_pte = sdma_v2_4_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x1fffff >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b1de44f22824..72f31cc7df00 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -379,8 +379,10 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring->use_doorbell) {
+ u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
+
/* XXX check if swapping is necessary on BE */
- adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
+ WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
} else {
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
@@ -641,10 +643,11 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
{
struct amdgpu_ring *ring;
- u32 rb_cntl, ib_cntl;
+ u32 rb_cntl, ib_cntl, wptr_poll_cntl;
u32 rb_bufsz;
u32 wb_offset;
u32 doorbell;
+ u64 wptr_gpu_addr;
int i, j, r;
for (i = 0; i < adev->sdma.num_instances; i++) {
@@ -707,6 +710,20 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
}
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
+ /* setup the wptr shadow polling */
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
+ lower_32_bits(wptr_gpu_addr));
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
+ upper_32_bits(wptr_gpu_addr));
+ wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
+ if (amdgpu_sriov_vf(adev))
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
+ else
+ wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
+ WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
+
/* enable DMA RB */
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
@@ -1713,11 +1730,11 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
}
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
- .copy_max_bytes = 0x1fffff,
+ .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.copy_num_dw = 7,
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
- .fill_max_bytes = 0x1fffff,
+ .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
.fill_num_dw = 5,
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
};
@@ -1731,8 +1748,14 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v3_0_vm_copy_pte,
+
.write_pte = sdma_v3_0_vm_write_pte,
+
+ /* not 0x3fffff due to HW limitation */
+ .set_max_nums_pte_pde = 0x3fffe0 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index fd7c72aaafa6..c26d205ff3bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -54,7 +54,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
static const u32 golden_settings_sdma_4[] = {
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -89,7 +89,7 @@ static const u32 golden_settings_sdma_vg10[] = {
static const u32 golden_settings_sdma_4_1[] =
{
- SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
+ SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
@@ -398,7 +398,7 @@ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
- amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0));
+ amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
amdgpu_ring_write(ring, 1);
}
@@ -1264,6 +1264,11 @@ static int sdma_v4_0_sw_fini(void *handle)
for (i = 0; i < adev->sdma.num_instances; i++)
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ release_firmware(adev->sdma.instance[i].fw);
+ adev->sdma.instance[i].fw = NULL;
+ }
+
return 0;
}
@@ -1714,8 +1719,13 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
+ .copy_pte_num_dw = 7,
.copy_pte = sdma_v4_0_vm_copy_pte,
+
.write_pte = sdma_v4_0_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0x400000 >> 3,
+ .set_pte_pde_num_dw = 10,
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 112969f3301a..3fa2fbf8c9a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -887,8 +887,13 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
}
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
+ .copy_pte_num_dw = 5,
.copy_pte = si_dma_vm_copy_pte,
+
.write_pte = si_dma_vm_write_pte,
+
+ .set_max_nums_pte_pde = 0xffff8 >> 3,
+ .set_pte_pde_num_dw = 9,
.set_pte_pde = si_dma_vm_set_pte_pde,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index d63873f3f574..9b8db6046271 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -1847,7 +1847,6 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
@@ -3060,9 +3059,9 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
return ret;
}
-static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
+static bool si_dpm_vblank_too_short(void *handle)
{
-
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we never hit the non-gddr5 limit so disable it */
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
@@ -3871,9 +3870,10 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
0 : -EINVAL;
}
-static int si_dpm_force_performance_level(struct amdgpu_device *adev,
+static int si_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
struct si_ps *ps = si_get_ps(rps);
u32 levels = ps->performance_level_count;
@@ -6575,11 +6575,12 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
}
}
-static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_get_fan_speed_percent(void *handle,
u32 *speed)
{
u32 duty, duty100;
u64 tmp64;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.no_fan)
return -ENOENT;
@@ -6600,9 +6601,10 @@ static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
+static int si_dpm_set_fan_speed_percent(void *handle,
u32 speed)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
u32 duty, duty100;
@@ -6633,8 +6635,10 @@ static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
return 0;
}
-static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
+static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6649,8 +6653,9 @@ static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
}
}
-static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
+static u32 si_dpm_get_fan_control_mode(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
@@ -6946,8 +6951,9 @@ static void si_dpm_disable(struct amdgpu_device *adev)
ni_update_current_ps(adev, boot_ps);
}
-static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_pre_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
struct amdgpu_ps *new_ps = &requested_ps;
@@ -6984,8 +6990,9 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
return 0;
}
-static int si_dpm_set_power_state(struct amdgpu_device *adev)
+static int si_dpm_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
@@ -7086,8 +7093,9 @@ static int si_dpm_set_power_state(struct amdgpu_device *adev)
return 0;
}
-static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
+static void si_dpm_post_set_power_state(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
@@ -7103,8 +7111,10 @@ void si_dpm_reset_asic(struct amdgpu_device *adev)
}
#endif
-static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
+static void si_dpm_display_configuration_changed(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
si_program_display_gap(adev);
}
@@ -7486,9 +7496,10 @@ static void si_dpm_fini(struct amdgpu_device *adev)
amdgpu_free_extended_power_table(adev);
}
-static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+static void si_dpm_debugfs_print_current_performance_level(void *handle,
struct seq_file *m)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -7860,10 +7871,11 @@ static int si_dpm_set_powergating_state(void *handle,
}
/* get temperature in millidegrees */
-static int si_dpm_get_temp(struct amdgpu_device *adev)
+static int si_dpm_get_temp(void *handle)
{
u32 temp;
int actual_temp = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
CTF_TEMP_SHIFT;
@@ -7878,8 +7890,9 @@ static int si_dpm_get_temp(struct amdgpu_device *adev)
return actual_temp;
}
-static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_sclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7889,8 +7902,9 @@ static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
}
-static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+static u32 si_dpm_get_mclk(void *handle, bool low)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
@@ -7900,9 +7914,11 @@ static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
}
-static void si_dpm_print_power_state(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
+static void si_dpm_print_power_state(void *handle,
+ void *current_ps)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
struct si_ps *ps = si_get_ps(rps);
struct rv7xx_pl *pl;
int i;
@@ -7927,7 +7943,6 @@ static int si_dpm_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- si_dpm_set_dpm_funcs(adev);
si_dpm_set_irq_funcs(adev);
return 0;
}
@@ -7942,20 +7957,23 @@ static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
(si_cpl1->vddci == si_cpl2->vddci));
}
-static int si_check_state_equal(struct amdgpu_device *adev,
- struct amdgpu_ps *cps,
- struct amdgpu_ps *rps,
+static int si_check_state_equal(void *handle,
+ void *current_ps,
+ void *request_ps,
bool *equal)
{
struct si_ps *si_cps;
struct si_ps *si_rps;
int i;
+ struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
+ struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
return -EINVAL;
- si_cps = si_get_ps(cps);
- si_rps = si_get_ps(rps);
+ si_cps = si_get_ps((struct amdgpu_ps *)cps);
+ si_rps = si_get_ps((struct amdgpu_ps *)rps);
if (si_cps == NULL) {
printk("si_cps is NULL\n");
@@ -7983,9 +8001,10 @@ static int si_check_state_equal(struct amdgpu_device *adev,
return 0;
}
-static int si_dpm_read_sensor(struct amdgpu_device *adev, int idx,
+static int si_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
struct amdgpu_ps *rps = &eg_pi->current_rps;
struct si_ps *ps = si_get_ps(rps);
@@ -8041,7 +8060,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
.set_powergating_state = si_dpm_set_powergating_state,
};
-static const struct amdgpu_dpm_funcs si_dpm_funcs = {
+const struct amd_pm_funcs si_dpm_funcs = {
.get_temperature = &si_dpm_get_temp,
.pre_set_power_state = &si_dpm_pre_set_power_state,
.set_power_state = &si_dpm_set_power_state,
@@ -8062,12 +8081,6 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
.read_sensor = &si_dpm_read_sensor,
};
-static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
-{
- if (adev->pm.funcs == NULL)
- adev->pm.funcs = &si_dpm_funcs;
-}
-
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
.set = si_dpm_set_interrupt_state,
.process = si_dpm_process_interrupt,
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.h b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
index 51ce21c5f4fb..9fe343de3477 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.h
@@ -246,6 +246,7 @@ enum si_display_gap
};
extern const struct amd_ip_funcs si_dpm_ip_funcs;
+extern const struct amd_pm_funcs si_dpm_funcs;
struct ni_leakage_coeffients
{
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index ce25e03a077d..d2c6b80309c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -118,6 +118,19 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
return (wptr & adev->irq.ih.ptr_mask);
}
+/**
+ * si_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
static void si_ih_decode_iv(struct amdgpu_device *adev,
struct amdgpu_iv_entry *entry)
{
@@ -288,6 +301,7 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
static const struct amdgpu_ih_funcs si_ih_funcs = {
.get_wptr = si_ih_get_wptr,
+ .prescreen_iv = si_ih_prescreen_iv,
.decode_iv = si_ih_decode_iv,
.set_rptr = si_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index f2c3a49f73a0..245a18aeb389 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -407,18 +407,27 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
+static int soc15_asic_reset(struct amdgpu_device *adev)
{
u32 i;
- dev_info(adev->dev, "GPU pci config reset\n");
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU reset\n");
/* disable BM */
pci_clear_master(adev->pdev);
- /* reset */
- amdgpu_pci_config_reset(adev);
- udelay(100);
+ pci_save_state(adev->pdev);
+
+ for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
+ adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
+ break;
+ }
+ }
+
+ pci_restore_state(adev->pdev);
/* wait for asic to come out of reset */
for (i = 0; i < adev->usec_timeout; i++) {
@@ -430,14 +439,6 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
udelay(1);
}
-}
-
-static int soc15_asic_reset(struct amdgpu_device *adev)
-{
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- soc15_gpu_pci_config_reset(adev);
-
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index 923df2c0e535..5ed00692618e 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,19 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
}
/**
+ * tonga_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ /* Process all interrupts */
+ return true;
+}
+
+/**
* tonga_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -478,6 +491,7 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
.get_wptr = tonga_ih_get_wptr,
+ .prescreen_iv = tonga_ih_prescreen_iv,
.decode_iv = tonga_ih_decode_iv,
.set_rptr = tonga_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 23a85750edd6..b8ed8faf2003 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -1161,7 +1161,7 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
*/
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 11134d5f7443..75745544600a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -1011,10 +1011,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
{
DRM_DEBUG("IH: VCE\n");
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
- VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
- ~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
-
switch (entry->src_data[0]) {
case 0:
case 1:
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 21e7b88401e1..1eb4d79d6e30 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -812,7 +812,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
*/
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
{
- amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
+ amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
amdgpu_ring_write(ring, 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 56150e8d1ed2..a3b30d84dbb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -219,14 +219,92 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
wptr, adev->irq.ih.rptr, tmp);
adev->irq.ih.rptr = tmp;
- tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
+ tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
- WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
}
return (wptr & adev->irq.ih.ptr_mask);
}
/**
+ * vega10_ih_prescreen_iv - prescreen an interrupt vector
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns true if the interrupt vector should be further processed.
+ */
+static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
+{
+ u32 ring_index = adev->irq.ih.rptr >> 2;
+ u32 dw0, dw3, dw4, dw5;
+ u16 pasid;
+ u64 addr, key;
+ struct amdgpu_vm *vm;
+ int r;
+
+ dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
+ dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
+ dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
+ dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
+
+ /* Filter retry page faults, let only the first one pass. If
+ * there are too many outstanding faults, ignore them until
+ * some faults get cleared.
+ */
+ switch (dw0 & 0xff) {
+ case AMDGPU_IH_CLIENTID_VMC:
+ case AMDGPU_IH_CLIENTID_UTCL2:
+ break;
+ default:
+ /* Not a VM fault */
+ return true;
+ }
+
+ /* Not a retry fault */
+ if (!(dw5 & 0x80))
+ return true;
+
+ pasid = dw3 & 0xffff;
+ /* No PASID, can't identify faulting process */
+ if (!pasid)
+ return true;
+
+ addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
+ key = AMDGPU_VM_FAULT(pasid, addr);
+ r = amdgpu_ih_add_fault(adev, key);
+
+ /* Hash table is full or the fault is already being processed,
+ * ignore further page faults
+ */
+ if (r != 0)
+ goto ignore_iv;
+
+ /* Track retry faults in per-VM fault FIFO. */
+ spin_lock(&adev->vm_manager.pasid_lock);
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock(&adev->vm_manager.pasid_lock);
+ if (WARN_ON_ONCE(!vm)) {
+ /* VM not found, process it normally */
+ amdgpu_ih_clear_fault(adev, key);
+ return true;
+ }
+ /* No locking required with single writer and single reader */
+ r = kfifo_put(&vm->faults, key);
+ if (!r) {
+ /* FIFO is full. Ignore it until there is space */
+ amdgpu_ih_clear_fault(adev, key);
+ goto ignore_iv;
+ }
+
+ /* It's the first fault for this address, process it normally */
+ return true;
+
+ignore_iv:
+ adev->irq.ih.rptr += 32;
+ return false;
+}
+
+/**
* vega10_ih_decode_iv - decode an interrupt vector
*
* @adev: amdgpu_device pointer
@@ -310,6 +388,14 @@ static int vega10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
+ adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
+ if (!adev->irq.ih.faults)
+ return -ENOMEM;
+ INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
+ AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spin_lock_init(&adev->irq.ih.faults->lock);
+ adev->irq.ih.faults->count = 0;
+
r = amdgpu_irq_init(adev);
return r;
@@ -322,6 +408,9 @@ static int vega10_ih_sw_fini(void *handle)
amdgpu_irq_fini(adev);
amdgpu_ih_ring_fini(adev);
+ kfree(adev->irq.ih.faults);
+ adev->irq.ih.faults = NULL;
+
return 0;
}
@@ -410,6 +499,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
.get_wptr = vega10_ih_get_wptr,
+ .prescreen_iv = vega10_ih_prescreen_iv,
.decode_iv = vega10_ih_decode_iv,
.set_rptr = vega10_ih_set_rptr
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 61fff25b4ce7..5df12b287201 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -168,13 +168,6 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
pasid_limit = min_t(unsigned int,
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
iommu_info.max_pasids);
- /*
- * last pasid is used for kernel queues doorbells
- * in the future the last pasid might be used for a kernel thread.
- */
- pasid_limit = min_t(unsigned int,
- pasid_limit,
- kfd->doorbell_process_limit - 1);
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
if (err < 0) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index acf4d2a977ad..feb76c235b1a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -24,16 +24,15 @@
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/idr.h>
/*
- * This extension supports a kernel level doorbells management for
- * the kernel queues.
- * Basically the last doorbells page is devoted to kernel queues
- * and that's assures that any user process won't get access to the
- * kernel doorbells page
+ * This extension supports a kernel level doorbells management for the
+ * kernel queues using the first doorbell page reserved for the kernel.
*/
-#define KERNEL_DOORBELL_PASID 1
+static DEFINE_IDA(doorbell_ida);
+static unsigned int max_doorbell_slices;
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
/*
@@ -84,13 +83,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
(doorbell_aperture_size - doorbell_start_offset) /
doorbell_process_allocation();
else
- doorbell_process_limit = 0;
+ return -ENOSPC;
+
+ if (!max_doorbell_slices ||
+ doorbell_process_limit < max_doorbell_slices)
+ max_doorbell_slices = doorbell_process_limit;
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
- kfd->doorbell_process_limit = doorbell_process_limit - 1;
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
doorbell_process_allocation());
@@ -185,11 +187,10 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL;
/*
- * Calculating the kernel doorbell offset using "faked" kernel
- * pasid that allocated for kernel queues only
+ * Calculating the kernel doorbell offset using the first
+ * doorbell page.
*/
- *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
- sizeof(u32)) + inx;
+ *doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -228,11 +229,12 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
{
/*
* doorbell_id_offset accounts for doorbells taken by KGD.
- * pasid * doorbell_process_allocation/sizeof(u32) adjusts
- * to the process's doorbells
+ * index * doorbell_process_allocation/sizeof(u32) adjusts to
+ * the process's doorbells.
*/
return kfd->doorbell_id_offset +
- process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
+ process->doorbell_index
+ * doorbell_process_allocation() / sizeof(u32) +
queue_id;
}
@@ -250,5 +252,21 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
struct kfd_process *process)
{
return dev->doorbell_base +
- process->pasid * doorbell_process_allocation();
+ process->doorbell_index * doorbell_process_allocation();
+}
+
+int kfd_alloc_process_doorbells(struct kfd_process *process)
+{
+ int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+ GFP_KERNEL);
+ if (r > 0)
+ process->doorbell_index = r;
+
+ return r;
+}
+
+void kfd_free_process_doorbells(struct kfd_process *process)
+{
+ if (process->doorbell_index)
+ ida_simple_remove(&doorbell_ida, process->doorbell_index);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 0d73bea22c45..6c5a9cab55de 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -103,10 +103,6 @@ static int __init kfd_module_init(void)
return -1;
}
- err = kfd_pasid_init();
- if (err < 0)
- return err;
-
err = kfd_chardev_init();
if (err < 0)
goto err_ioctl;
@@ -126,7 +122,6 @@ static int __init kfd_module_init(void)
err_topology:
kfd_chardev_exit();
err_ioctl:
- kfd_pasid_exit();
return err;
}
@@ -137,7 +132,6 @@ static void __exit kfd_module_exit(void)
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- kfd_pasid_exit();
dev_info(kfd_device, "Removed module\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 1e06de0bc673..d6a796144269 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -20,78 +20,64 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/types.h>
#include "kfd_priv.h"
-static unsigned long *pasid_bitmap;
-static unsigned int pasid_limit;
-static DEFINE_MUTEX(pasid_mutex);
-
-int kfd_pasid_init(void)
-{
- pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
-
- pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
- GFP_KERNEL);
- if (!pasid_bitmap)
- return -ENOMEM;
-
- set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
-
- return 0;
-}
-
-void kfd_pasid_exit(void)
-{
- kfree(pasid_bitmap);
-}
+static unsigned int pasid_bits = 16;
+static const struct kfd2kgd_calls *kfd2kgd;
bool kfd_set_pasid_limit(unsigned int new_limit)
{
- if (new_limit < pasid_limit) {
- bool ok;
-
- mutex_lock(&pasid_mutex);
-
- /* ensure that no pasids >= new_limit are in-use */
- ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
- pasid_limit);
- if (ok)
- pasid_limit = new_limit;
-
- mutex_unlock(&pasid_mutex);
-
- return ok;
+ if (new_limit < 2)
+ return false;
+
+ if (new_limit < (1U << pasid_bits)) {
+ if (kfd2kgd)
+ /* We've already allocated user PASIDs, too late to
+ * change the limit
+ */
+ return false;
+
+ while (new_limit < (1U << pasid_bits))
+ pasid_bits--;
}
return true;
}
-inline unsigned int kfd_get_pasid_limit(void)
+unsigned int kfd_get_pasid_limit(void)
{
- return pasid_limit;
+ return 1U << pasid_bits;
}
unsigned int kfd_pasid_alloc(void)
{
- unsigned int found;
-
- mutex_lock(&pasid_mutex);
-
- found = find_first_zero_bit(pasid_bitmap, pasid_limit);
- if (found == pasid_limit)
- found = 0;
- else
- set_bit(found, pasid_bitmap);
+ int r;
+
+ /* Find the first best KFD device for calling KGD */
+ if (!kfd2kgd) {
+ struct kfd_dev *dev = NULL;
+ unsigned int i = 0;
+
+ while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
+ if (dev && dev->kfd2kgd) {
+ kfd2kgd = dev->kfd2kgd;
+ break;
+ }
+ i++;
+ }
+
+ if (!kfd2kgd)
+ return false;
+ }
- mutex_unlock(&pasid_mutex);
+ r = kfd2kgd->alloc_pasid(pasid_bits);
- return found;
+ return r > 0 ? r : 0;
}
void kfd_pasid_free(unsigned int pasid)
{
- if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
- clear_bit(pasid, pasid_bitmap);
+ if (kfd2kgd)
+ kfd2kgd->free_pasid(pasid);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b397ec726400..4cb90f517906 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -157,9 +157,6 @@ struct kfd_dev {
* to HW doorbell, GFX reserved some
* at the start)
*/
- size_t doorbell_process_limit; /* Number of processes we have doorbell
- * space for.
- */
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
*/
@@ -495,6 +492,7 @@ struct kfd_process {
struct rcu_head rcu;
unsigned int pasid;
+ unsigned int doorbell_index;
/*
* List of kfd_process_device structures,
@@ -583,6 +581,10 @@ void write_kernel_doorbell(u32 __iomem *db, u32 value);
unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int queue_id);
+phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
+ struct kfd_process *process);
+int kfd_alloc_process_doorbells(struct kfd_process *process);
+void kfd_free_process_doorbells(struct kfd_process *process);
/* GTT Sub-Allocator */
@@ -694,8 +696,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
void pm_release_ib(struct packet_manager *pm);
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
- struct kfd_process *process);
/* Events */
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index c74cf22a1ed9..9e65ce3c1967 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -183,6 +183,7 @@ static void kfd_process_wq_release(struct work_struct *work)
kfd_event_free_process(p);
kfd_pasid_free(p->pasid);
+ kfd_free_process_doorbells(p);
mutex_unlock(&p->mutex);
@@ -288,6 +289,9 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (process->pasid == 0)
goto err_alloc_pasid;
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
+
mutex_init(&process->mutex);
process->mm = thread->mm;
@@ -329,6 +333,8 @@ err_process_pqm_init:
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
err_mmu_notifier:
mutex_destroy(&process->mutex);
+ kfd_free_process_doorbells(process);
+err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
kfree(process->queues);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 70e8c20acb2f..20457bb5a906 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -25,6 +25,8 @@
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
+struct seq_file;
+
/*
* Supported ASIC types
*/
@@ -144,6 +146,12 @@ enum amd_fan_ctrl_mode {
AMD_FAN_CTRL_AUTO = 2,
};
+enum pp_clock_type {
+ PP_SCLK,
+ PP_MCLK,
+ PP_PCIE,
+};
+
/* CG flags */
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
@@ -249,4 +257,62 @@ struct amd_ip_funcs {
void (*get_clockgating_state)(void *handle, u32 *flags);
};
+enum amd_pp_task;
+
+struct pp_states_info;
+
+struct amd_pm_funcs {
+ int (*get_temperature)(void *handle);
+ int (*pre_set_power_state)(void *handle);
+ int (*set_power_state)(void *handle);
+ void (*post_set_power_state)(void *handle);
+ void (*display_configuration_changed)(void *handle);
+ u32 (*get_sclk)(void *handle, bool low);
+ u32 (*get_mclk)(void *handle, bool low);
+ void (*print_power_state)(void *handle, void *ps);
+ void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
+ int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
+ bool (*vblank_too_short)(void *handle);
+ void (*powergate_uvd)(void *handle, bool gate);
+ void (*powergate_vce)(void *handle, bool gate);
+ void (*enable_bapm)(void *handle, bool enable);
+ void (*set_fan_control_mode)(void *handle, u32 mode);
+ u32 (*get_fan_control_mode)(void *handle);
+ int (*set_fan_speed_percent)(void *handle, u32 speed);
+ int (*get_fan_speed_percent)(void *handle, u32 *speed);
+ int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
+ int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
+ int (*get_sclk_od)(void *handle);
+ int (*set_sclk_od)(void *handle, uint32_t value);
+ int (*get_mclk_od)(void *handle);
+ int (*set_mclk_od)(void *handle, uint32_t value);
+ int (*check_state_equal)(void *handle,
+ void *cps,
+ void *rps,
+ bool *equal);
+ int (*read_sensor)(void *handle, int idx, void *value,
+ int *size);
+
+ struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
+ int (*reset_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*get_power_profile_state)(void *handle,
+ struct amd_pp_profile *query);
+ int (*set_power_profile_state)(void *handle,
+ struct amd_pp_profile *request);
+ int (*switch_power_profile)(void *handle,
+ enum amd_pp_profile_type type);
+ int (*load_firmware)(void *handle);
+ int (*wait_for_fw_loading_complete)(void *handle);
+ enum amd_dpm_forced_level (*get_performance_level)(void *handle);
+ enum amd_pm_state_type (*get_current_power_state)(void *handle);
+ int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
+ void *input, void *output);
+ int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
+ int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
+ int (*get_pp_table)(void *handle, char **table);
+ int (*set_pp_table)(void *handle, const char *buf, size_t size);
+};
+
+
#endif /* __AMD_SHARED_H__ */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
index 34c6ff52710e..6af9f0217b34 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h
@@ -5454,5 +5454,7 @@
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en_MASK 0x1
+#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en__SHIFT 0
#endif /* SMU_7_0_1_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 837296db9628..7c92f4707085 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1017,6 +1017,19 @@ struct atom_14nm_combphy_tmds_vs_set
uint8_t margin_deemph_lane0__deemph_sel_val;
};
+struct atom_i2c_reg_info {
+ uint8_t ucI2cRegIndex;
+ uint8_t ucI2cRegVal;
+};
+
+struct atom_hdmi_retimer_redriver_set {
+ uint8_t HdmiSlvAddr;
+ uint8_t HdmiRegNum;
+ uint8_t Hdmi6GRegNum;
+ struct atom_i2c_reg_info HdmiRegSetting[9]; //For non 6G Hz use
+ struct atom_i2c_reg_info Hdmi6GhzRegSetting[3]; //For 6G Hz use.
+};
+
struct atom_integrated_system_info_v1_11
{
struct atom_common_table_header table_header;
@@ -1052,7 +1065,11 @@ struct atom_integrated_system_info_v1_11
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
struct atom_camera_data camera_info;
- uint32_t reserved[138];
+ struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
+ struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
+ struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
+ struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
+ uint32_t reserved[108];
};
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 0214f63f52fc..030b14649c4e 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -100,6 +100,7 @@ enum cgs_system_info_id {
CGS_SYSTEM_INFO_GFX_SE_INFO,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
+ CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
@@ -193,8 +194,6 @@ struct cgs_acpi_method_info {
* @type: memory type
* @size: size in bytes
* @align: alignment in bytes
- * @min_offset: minimum offset from start of heap
- * @max_offset: maximum offset from start of heap
* @handle: memory handle (output)
*
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
@@ -216,7 +215,6 @@ struct cgs_acpi_method_info {
*/
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
uint64_t size, uint64_t align,
- uint64_t min_offset, uint64_t max_offset,
cgs_handle_t *handle);
/**
@@ -310,6 +308,22 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
unsigned index, uint32_t value);
+#define CGS_REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
+#define CGS_REG_FIELD_MASK(reg, field) reg##__##field##_MASK
+
+#define CGS_REG_SET_FIELD(orig_val, reg, field, field_val) \
+ (((orig_val) & ~CGS_REG_FIELD_MASK(reg, field)) | \
+ (CGS_REG_FIELD_MASK(reg, field) & ((field_val) << CGS_REG_FIELD_SHIFT(reg, field))))
+
+#define CGS_REG_GET_FIELD(value, reg, field) \
+ (((value) & CGS_REG_FIELD_MASK(reg, field)) >> CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD(device, reg, field, val) \
+ cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
+#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
+ cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
+
/**
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
* @cgs_device: opaque device handle
@@ -463,8 +477,8 @@ struct cgs_device
#define CGS_OS_CALL(func,dev,...) \
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
-#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
- CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
+#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
+ CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
#define cgs_free_gpu_mem(dev,handle) \
CGS_CALL(free_gpu_mem,dev,handle)
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 94277cb734d2..f516fd10e6ba 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -112,6 +112,9 @@ struct tile_config {
*
* @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
*
+ * @alloc_pasid: Allocate a PASID
+ * @free_pasid: Free a PASID
+ *
* @program_sh_mem_settings: A function that should initiate the memory
* properties such as main aperture memory type (cache / non cached) and
* secondary aperture base address, size and memory type.
@@ -160,6 +163,9 @@ struct kfd2kgd_calls {
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
+ int (*alloc_pasid)(unsigned int bits);
+ void (*free_pasid)(unsigned int pasid);
+
/* Register access functions */
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
new file mode 100644
index 000000000000..6dc159924ed1
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/linux/chash.h
@@ -0,0 +1,366 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _LINUX_CHASH_H
+#define _LINUX_CHASH_H
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <asm/bitsperlong.h>
+
+#if BITS_PER_LONG == 32
+# define _CHASH_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _CHASH_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
+struct __chash_table {
+ u8 bits;
+ u8 key_size;
+ unsigned int value_size;
+ u32 size_mask;
+ unsigned long *occup_bitmap, *valid_bitmap;
+ union {
+ u32 *keys32;
+ u64 *keys64;
+ };
+ u8 *values;
+
+#ifdef CONFIG_CHASH_STATS
+ u64 hits, hits_steps, hits_time_ns;
+ u64 miss, miss_steps, miss_time_ns;
+ u64 relocs, reloc_dist;
+#endif
+};
+
+#define __CHASH_BITMAP_SIZE(bits) \
+ (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
+#define __CHASH_ARRAY_SIZE(bits, size) \
+ ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
+
+#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
+ (__CHASH_BITMAP_SIZE(bits) * 2 + \
+ __CHASH_ARRAY_SIZE(bits, key_size) + \
+ __CHASH_ARRAY_SIZE(bits, value_size))
+
+#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
+ struct { \
+ struct __chash_table table; \
+ unsigned long data \
+ [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
+ }
+
+/**
+ * struct chash_table - Dynamically allocated closed hash table
+ *
+ * Use this struct for dynamically allocated hash tables (using
+ * chash_table_alloc and chash_table_free), where the size is
+ * determined at runtime.
+ */
+struct chash_table {
+ struct __chash_table table;
+ unsigned long *data;
+};
+
+/**
+ * DECLARE_CHASH_TABLE - macro to declare a closed hash table
+ * @table: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * This declares the hash table variable with a static size.
+ *
+ * The closed hash table stores key-value pairs with low memory and
+ * lookup overhead. In operation it performs no dynamic memory
+ * management. The data being stored does not require any
+ * list_heads. The hash table performs best with small @val_sz and as
+ * long as some space (about 50%) is left free in the table. But the
+ * table can still work reasonably efficiently even when filled up to
+ * about 90%. If bigger data items need to be stored and looked up,
+ * store the pointer to it as value in the hash table.
+ *
+ * @val_sz may be 0. This can be useful when all the stored
+ * information is contained in the key itself and the fact that it is
+ * in the hash table (or not).
+ */
+#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
+ STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
+
+#ifdef CONFIG_CHASH_STATS
+#define __CHASH_STATS_INIT(prefix), \
+ prefix.hits = 0, \
+ prefix.hits_steps = 0, \
+ prefix.hits_time_ns = 0, \
+ prefix.miss = 0, \
+ prefix.miss_steps = 0, \
+ prefix.miss_time_ns = 0, \
+ prefix.relocs = 0, \
+ prefix.reloc_dist = 0
+#else
+#define __CHASH_STATS_INIT(prefix)
+#endif
+
+#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
+ prefix.bits = (bts), \
+ prefix.key_size = (key_sz), \
+ prefix.value_size = (val_sz), \
+ prefix.size_mask = ((1 << bts) - 1), \
+ prefix.occup_bitmap = &data[0], \
+ prefix.valid_bitmap = &data \
+ [__CHASH_BITMAP_SIZE(bts)], \
+ prefix.keys64 = (u64 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2], \
+ prefix.values = (u8 *)&data \
+ [__CHASH_BITMAP_SIZE(bts) * 2 + \
+ __CHASH_ARRAY_SIZE(bts, key_sz)] \
+ __CHASH_STATS_INIT(prefix)
+
+/**
+ * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ *
+ * Note: the macro can be used for global and local hash table variables.
+ */
+#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
+ .table = { \
+ __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
+ }, \
+ .data = {0} \
+ }
+
+/**
+ * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
+ * @tbl: name of the declared hash table
+ * @bts: Table size will be 2^bits entries
+ * @key_sz: Size of hash keys in bytes, 4 or 8
+ * @val_sz: Size of data values in bytes, can be 0
+ */
+#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
+ __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
+
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask);
+void chash_table_free(struct chash_table *table);
+
+/**
+ * chash_table_dump_stats - Dump statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ *
+ * Dumps some performance statistics of the table gathered in operation
+ * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
+ * user must turn on messages for chash.c (file chash.c +p).
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
+
+void __chash_table_dump_stats(struct __chash_table *table);
+#else
+#define chash_table_dump_stats(tbl)
+#endif
+
+/**
+ * chash_table_reset_stats - Reset statistics of a closed hash table
+ * @tbl: Pointer to the table structure
+ */
+#ifdef CONFIG_CHASH_STATS
+#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
+
+static inline void __chash_table_reset_stats(struct __chash_table *table)
+{
+ (void)table __CHASH_STATS_INIT((*table));
+}
+#else
+#define chash_table_reset_stats(tbl)
+#endif
+
+/**
+ * chash_table_copy_in - Copy a new value into the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to add or update
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @key already has an entry, its value is replaced. Otherwise a
+ * new entry is added. If @value is NULL, the value is left unchanged
+ * or uninitialized. Returns 1 if an entry already existed, 0 if a new
+ * entry was added or %-ENOMEM if there was no free space in the
+ * table.
+ */
+#define chash_table_copy_in(tbl, key, value) \
+ __chash_table_copy_in(&(*tbl).table, key, value)
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value);
+
+/**
+ * chash_table_copy_out - Copy a value out of the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. Returns the slot index of the
+ * entry or %-EINVAL if @key was not found.
+ */
+#define chash_table_copy_out(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, false)
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove);
+
+/**
+ * chash_table_remove - Remove an entry from the hash table
+ * @tbl: Pointer to the table structure
+ * @key: Key of the entry to find
+ * @value: Pointer to value to copy, may be NULL
+ *
+ * If @value is not NULL and the table has a non-0 value_size, the
+ * value at @key is copied to @value. The entry is removed from the
+ * table. Returns the slot index of the removed entry or %-EINVAL if
+ * @key was not found.
+ */
+#define chash_table_remove(tbl, key, value) \
+ __chash_table_copy_out(&(*tbl).table, key, value, true)
+
+/*
+ * Low level iterator API used internally by the above functions.
+ */
+struct chash_iter {
+ struct __chash_table *table;
+ unsigned long mask;
+ int slot;
+};
+
+/**
+ * CHASH_ITER_INIT - Initialize a hash table iterator
+ * @tbl: Pointer to hash table to iterate over
+ * @s: Initial slot number
+ */
+#define CHASH_ITER_INIT(table, s) { \
+ table, \
+ 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ s \
+ }
+/**
+ * CHASH_ITER_SET - Set hash table iterator to new slot
+ * @iter: Iterator
+ * @s: Slot number
+ */
+#define CHASH_ITER_SET(iter, s) \
+ (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
+ (iter).slot = (s)
+/**
+ * CHASH_ITER_INC - Increment hash table iterator
+ * @table: Hash table to iterate over
+ *
+ * Wraps around at the end.
+ */
+#define CHASH_ITER_INC(iter) do { \
+ (iter).mask = (iter).mask << 1 | \
+ (iter).mask >> (BITS_PER_LONG - 1); \
+ (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
+ } while (0)
+
+static inline bool chash_iter_is_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+static inline bool chash_iter_is_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
+ iter.mask);
+}
+
+static inline void chash_iter_set_valid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
+}
+static inline void chash_iter_set_invalid(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+static inline void chash_iter_set_empty(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
+}
+
+static inline u32 chash_iter_key32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys32[iter.slot];
+}
+static inline u64 chash_iter_key64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->keys64[iter.slot];
+}
+static inline u64 chash_iter_key(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return (iter.table->key_size == 4) ?
+ iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
+}
+
+static inline u32 chash_iter_hash32(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 4);
+ return hash_32(chash_iter_key32(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash64(const struct chash_iter iter)
+{
+ BUG_ON(iter.table->key_size != 8);
+ return hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline u32 chash_iter_hash(const struct chash_iter iter)
+{
+ return (iter.table->key_size == 4) ?
+ hash_32(chash_iter_key32(iter), iter.table->bits) :
+ hash_64(chash_iter_key64(iter), iter.table->bits);
+}
+
+static inline void *chash_iter_value(const struct chash_iter iter)
+{
+ BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
+ return iter.table->values +
+ ((unsigned long)iter.slot * iter.table->value_size);
+}
+
+#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h
index 9a9e6c7e89ea..2fb25abaf7c8 100644
--- a/drivers/gpu/drm/amd/include/v9_structs.h
+++ b/drivers/gpu/drm/amd/include/v9_structs.h
@@ -284,8 +284,8 @@ struct v9_mqd {
uint32_t gds_save_mask_hi;
uint32_t ctx_save_base_addr_lo;
uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
+ uint32_t dynamic_cu_mask_addr_lo;
+ uint32_t dynamic_cu_mask_addr_hi;
uint32_t cp_mqd_base_addr_lo;
uint32_t cp_mqd_base_addr_hi;
uint32_t cp_hqd_active;
@@ -672,6 +672,14 @@ struct v9_mqd {
uint32_t reserved_511;
};
+struct v9_mqd_allocation {
+ struct v9_mqd mqd;
+ uint32_t wptr_poll_mem;
+ uint32_t rptr_report_mem;
+ uint32_t dynamic_cu_mask;
+ uint32_t dynamic_rb_mask;
+};
+
/* from vega10 all CSA format is shifted to chain ib compatible mode */
struct v9_ce_ib_state {
/* section of non chained ib part */
diff --git a/drivers/gpu/drm/amd/include/vi_structs.h b/drivers/gpu/drm/amd/include/vi_structs.h
index 3e606a761d0e..20234820194b 100644
--- a/drivers/gpu/drm/amd/include/vi_structs.h
+++ b/drivers/gpu/drm/amd/include/vi_structs.h
@@ -423,265 +423,6 @@ struct vi_mqd_allocation {
uint32_t dynamic_rb_mask;
};
-struct cz_mqd {
- uint32_t header;
- uint32_t compute_dispatch_initiator;
- uint32_t compute_dim_x;
- uint32_t compute_dim_y;
- uint32_t compute_dim_z;
- uint32_t compute_start_x;
- uint32_t compute_start_y;
- uint32_t compute_start_z;
- uint32_t compute_num_thread_x;
- uint32_t compute_num_thread_y;
- uint32_t compute_num_thread_z;
- uint32_t compute_pipelinestat_enable;
- uint32_t compute_perfcount_enable;
- uint32_t compute_pgm_lo;
- uint32_t compute_pgm_hi;
- uint32_t compute_tba_lo;
- uint32_t compute_tba_hi;
- uint32_t compute_tma_lo;
- uint32_t compute_tma_hi;
- uint32_t compute_pgm_rsrc1;
- uint32_t compute_pgm_rsrc2;
- uint32_t compute_vmid;
- uint32_t compute_resource_limits;
- uint32_t compute_static_thread_mgmt_se0;
- uint32_t compute_static_thread_mgmt_se1;
- uint32_t compute_tmpring_size;
- uint32_t compute_static_thread_mgmt_se2;
- uint32_t compute_static_thread_mgmt_se3;
- uint32_t compute_restart_x;
- uint32_t compute_restart_y;
- uint32_t compute_restart_z;
- uint32_t compute_thread_trace_enable;
- uint32_t compute_misc_reserved;
- uint32_t compute_dispatch_id;
- uint32_t compute_threadgroup_id;
- uint32_t compute_relaunch;
- uint32_t compute_wave_restore_addr_lo;
- uint32_t compute_wave_restore_addr_hi;
- uint32_t compute_wave_restore_control;
- uint32_t reserved_39;
- uint32_t reserved_40;
- uint32_t reserved_41;
- uint32_t reserved_42;
- uint32_t reserved_43;
- uint32_t reserved_44;
- uint32_t reserved_45;
- uint32_t reserved_46;
- uint32_t reserved_47;
- uint32_t reserved_48;
- uint32_t reserved_49;
- uint32_t reserved_50;
- uint32_t reserved_51;
- uint32_t reserved_52;
- uint32_t reserved_53;
- uint32_t reserved_54;
- uint32_t reserved_55;
- uint32_t reserved_56;
- uint32_t reserved_57;
- uint32_t reserved_58;
- uint32_t reserved_59;
- uint32_t reserved_60;
- uint32_t reserved_61;
- uint32_t reserved_62;
- uint32_t reserved_63;
- uint32_t reserved_64;
- uint32_t compute_user_data_0;
- uint32_t compute_user_data_1;
- uint32_t compute_user_data_2;
- uint32_t compute_user_data_3;
- uint32_t compute_user_data_4;
- uint32_t compute_user_data_5;
- uint32_t compute_user_data_6;
- uint32_t compute_user_data_7;
- uint32_t compute_user_data_8;
- uint32_t compute_user_data_9;
- uint32_t compute_user_data_10;
- uint32_t compute_user_data_11;
- uint32_t compute_user_data_12;
- uint32_t compute_user_data_13;
- uint32_t compute_user_data_14;
- uint32_t compute_user_data_15;
- uint32_t cp_compute_csinvoc_count_lo;
- uint32_t cp_compute_csinvoc_count_hi;
- uint32_t reserved_83;
- uint32_t reserved_84;
- uint32_t reserved_85;
- uint32_t cp_mqd_query_time_lo;
- uint32_t cp_mqd_query_time_hi;
- uint32_t cp_mqd_connect_start_time_lo;
- uint32_t cp_mqd_connect_start_time_hi;
- uint32_t cp_mqd_connect_end_time_lo;
- uint32_t cp_mqd_connect_end_time_hi;
- uint32_t cp_mqd_connect_end_wf_count;
- uint32_t cp_mqd_connect_end_pq_rptr;
- uint32_t cp_mqd_connect_end_pq_wptr;
- uint32_t cp_mqd_connect_end_ib_rptr;
- uint32_t reserved_96;
- uint32_t reserved_97;
- uint32_t cp_mqd_save_start_time_lo;
- uint32_t cp_mqd_save_start_time_hi;
- uint32_t cp_mqd_save_end_time_lo;
- uint32_t cp_mqd_save_end_time_hi;
- uint32_t cp_mqd_restore_start_time_lo;
- uint32_t cp_mqd_restore_start_time_hi;
- uint32_t cp_mqd_restore_end_time_lo;
- uint32_t cp_mqd_restore_end_time_hi;
- uint32_t reserved_106;
- uint32_t reserved_107;
- uint32_t gds_cs_ctxsw_cnt0;
- uint32_t gds_cs_ctxsw_cnt1;
- uint32_t gds_cs_ctxsw_cnt2;
- uint32_t gds_cs_ctxsw_cnt3;
- uint32_t reserved_112;
- uint32_t reserved_113;
- uint32_t cp_pq_exe_status_lo;
- uint32_t cp_pq_exe_status_hi;
- uint32_t cp_packet_id_lo;
- uint32_t cp_packet_id_hi;
- uint32_t cp_packet_exe_status_lo;
- uint32_t cp_packet_exe_status_hi;
- uint32_t gds_save_base_addr_lo;
- uint32_t gds_save_base_addr_hi;
- uint32_t gds_save_mask_lo;
- uint32_t gds_save_mask_hi;
- uint32_t ctx_save_base_addr_lo;
- uint32_t ctx_save_base_addr_hi;
- uint32_t reserved_126;
- uint32_t reserved_127;
- uint32_t cp_mqd_base_addr_lo;
- uint32_t cp_mqd_base_addr_hi;
- uint32_t cp_hqd_active;
- uint32_t cp_hqd_vmid;
- uint32_t cp_hqd_persistent_state;
- uint32_t cp_hqd_pipe_priority;
- uint32_t cp_hqd_queue_priority;
- uint32_t cp_hqd_quantum;
- uint32_t cp_hqd_pq_base_lo;
- uint32_t cp_hqd_pq_base_hi;
- uint32_t cp_hqd_pq_rptr;
- uint32_t cp_hqd_pq_rptr_report_addr_lo;
- uint32_t cp_hqd_pq_rptr_report_addr_hi;
- uint32_t cp_hqd_pq_wptr_poll_addr_lo;
- uint32_t cp_hqd_pq_wptr_poll_addr_hi;
- uint32_t cp_hqd_pq_doorbell_control;
- uint32_t cp_hqd_pq_wptr;
- uint32_t cp_hqd_pq_control;
- uint32_t cp_hqd_ib_base_addr_lo;
- uint32_t cp_hqd_ib_base_addr_hi;
- uint32_t cp_hqd_ib_rptr;
- uint32_t cp_hqd_ib_control;
- uint32_t cp_hqd_iq_timer;
- uint32_t cp_hqd_iq_rptr;
- uint32_t cp_hqd_dequeue_request;
- uint32_t cp_hqd_dma_offload;
- uint32_t cp_hqd_sema_cmd;
- uint32_t cp_hqd_msg_type;
- uint32_t cp_hqd_atomic0_preop_lo;
- uint32_t cp_hqd_atomic0_preop_hi;
- uint32_t cp_hqd_atomic1_preop_lo;
- uint32_t cp_hqd_atomic1_preop_hi;
- uint32_t cp_hqd_hq_status0;
- uint32_t cp_hqd_hq_control0;
- uint32_t cp_mqd_control;
- uint32_t cp_hqd_hq_status1;
- uint32_t cp_hqd_hq_control1;
- uint32_t cp_hqd_eop_base_addr_lo;
- uint32_t cp_hqd_eop_base_addr_hi;
- uint32_t cp_hqd_eop_control;
- uint32_t cp_hqd_eop_rptr;
- uint32_t cp_hqd_eop_wptr;
- uint32_t cp_hqd_eop_done_events;
- uint32_t cp_hqd_ctx_save_base_addr_lo;
- uint32_t cp_hqd_ctx_save_base_addr_hi;
- uint32_t cp_hqd_ctx_save_control;
- uint32_t cp_hqd_cntl_stack_offset;
- uint32_t cp_hqd_cntl_stack_size;
- uint32_t cp_hqd_wg_state_offset;
- uint32_t cp_hqd_ctx_save_size;
- uint32_t cp_hqd_gds_resource_state;
- uint32_t cp_hqd_error;
- uint32_t cp_hqd_eop_wptr_mem;
- uint32_t cp_hqd_eop_dones;
- uint32_t reserved_182;
- uint32_t reserved_183;
- uint32_t reserved_184;
- uint32_t reserved_185;
- uint32_t reserved_186;
- uint32_t reserved_187;
- uint32_t reserved_188;
- uint32_t reserved_189;
- uint32_t reserved_190;
- uint32_t reserved_191;
- uint32_t iqtimer_pkt_header;
- uint32_t iqtimer_pkt_dw0;
- uint32_t iqtimer_pkt_dw1;
- uint32_t iqtimer_pkt_dw2;
- uint32_t iqtimer_pkt_dw3;
- uint32_t iqtimer_pkt_dw4;
- uint32_t iqtimer_pkt_dw5;
- uint32_t iqtimer_pkt_dw6;
- uint32_t iqtimer_pkt_dw7;
- uint32_t iqtimer_pkt_dw8;
- uint32_t iqtimer_pkt_dw9;
- uint32_t iqtimer_pkt_dw10;
- uint32_t iqtimer_pkt_dw11;
- uint32_t iqtimer_pkt_dw12;
- uint32_t iqtimer_pkt_dw13;
- uint32_t iqtimer_pkt_dw14;
- uint32_t iqtimer_pkt_dw15;
- uint32_t iqtimer_pkt_dw16;
- uint32_t iqtimer_pkt_dw17;
- uint32_t iqtimer_pkt_dw18;
- uint32_t iqtimer_pkt_dw19;
- uint32_t iqtimer_pkt_dw20;
- uint32_t iqtimer_pkt_dw21;
- uint32_t iqtimer_pkt_dw22;
- uint32_t iqtimer_pkt_dw23;
- uint32_t iqtimer_pkt_dw24;
- uint32_t iqtimer_pkt_dw25;
- uint32_t iqtimer_pkt_dw26;
- uint32_t iqtimer_pkt_dw27;
- uint32_t iqtimer_pkt_dw28;
- uint32_t iqtimer_pkt_dw29;
- uint32_t iqtimer_pkt_dw30;
- uint32_t iqtimer_pkt_dw31;
- uint32_t reserved_225;
- uint32_t reserved_226;
- uint32_t reserved_227;
- uint32_t set_resources_header;
- uint32_t set_resources_dw1;
- uint32_t set_resources_dw2;
- uint32_t set_resources_dw3;
- uint32_t set_resources_dw4;
- uint32_t set_resources_dw5;
- uint32_t set_resources_dw6;
- uint32_t set_resources_dw7;
- uint32_t reserved_236;
- uint32_t reserved_237;
- uint32_t reserved_238;
- uint32_t reserved_239;
- uint32_t queue_doorbell_id0;
- uint32_t queue_doorbell_id1;
- uint32_t queue_doorbell_id2;
- uint32_t queue_doorbell_id3;
- uint32_t queue_doorbell_id4;
- uint32_t queue_doorbell_id5;
- uint32_t queue_doorbell_id6;
- uint32_t queue_doorbell_id7;
- uint32_t queue_doorbell_id8;
- uint32_t queue_doorbell_id9;
- uint32_t queue_doorbell_id10;
- uint32_t queue_doorbell_id11;
- uint32_t queue_doorbell_id12;
- uint32_t queue_doorbell_id13;
- uint32_t queue_doorbell_id14;
- uint32_t queue_doorbell_id15;
-};
-
struct vi_ce_ib_state {
uint32_t ce_ib_completion_status;
uint32_t ce_constegnine_count;
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
new file mode 100644
index 000000000000..03ee7ad21ac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Kconfig
@@ -0,0 +1,27 @@
+menu "AMD Library routines"
+
+#
+# Closed hash table
+#
+config CHASH
+ tristate "Closed hash table"
+ help
+ Statically sized closed hash table implementation with low
+ memory and CPU overhead.
+
+config CHASH_STATS
+ bool "Closed hash table performance statistics"
+ depends on CHASH
+ default n
+ help
+ Enable collection of performance statistics for closed hash tables.
+
+config CHASH_SELFTEST
+ bool "Closed hash table self test"
+ depends on CHASH
+ default n
+ help
+ Runs a selftest during module load. Several module parameters
+ are available to modify the behaviour of the test.
+
+endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
new file mode 100644
index 000000000000..87cd7009e80f
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for AMD library routines, which are used by AMD driver
+# components.
+#
+# This is for common library routines that can be shared between AMD
+# driver components or later moved to kernel/lib for sharing with
+# other drivers.
+
+ccflags-y := -I$(src)/../include
+
+obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
new file mode 100644
index 000000000000..e07e6f3030d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/lib/chash.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/hash.h>
+#include <linux/bug.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/sched/clock.h>
+#include <asm/div64.h>
+#include <linux/chash.h>
+
+/**
+ * chash_table_alloc - Allocate closed hash table
+ * @table: Pointer to the table structure
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @value_size: Size of data values in bytes, can be 0
+ */
+int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
+ unsigned int value_size, gfp_t gfp_mask)
+{
+ if (bits > 31)
+ return -EINVAL;
+
+ if (key_size != 4 && key_size != 8)
+ return -EINVAL;
+
+ table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
+ sizeof(long), gfp_mask);
+ if (!table->data)
+ return -ENOMEM;
+
+ __CHASH_TABLE_INIT(table->table, table->data,
+ bits, key_size, value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(chash_table_alloc);
+
+/**
+ * chash_table_free - Free closed hash table
+ * @table: Pointer to the table structure
+ */
+void chash_table_free(struct chash_table *table)
+{
+ kfree(table->data);
+}
+EXPORT_SYMBOL(chash_table_free);
+
+#ifdef CONFIG_CHASH_STATS
+
+#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
+ u64 __nom = (nom); \
+ u64 __denom = (denom); \
+ u64 __quot, __frac; \
+ u32 __rem; \
+ \
+ while (__denom >> 32) { \
+ __nom >>= 1; \
+ __denom >>= 1; \
+ } \
+ __quot = __nom; \
+ __rem = do_div(__quot, __denom); \
+ __frac = __rem * (frac_digits) + (__denom >> 1); \
+ do_div(__frac, __denom); \
+ (quot) = __quot; \
+ (frac) = __frac; \
+ } while (0)
+
+void __chash_table_dump_stats(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ u32 filled = 0, empty = 0, tombstones = 0;
+ u64 quot1, quot2;
+ u32 frac1, frac2;
+
+ do {
+ if (chash_iter_is_valid(iter))
+ filled++;
+ else if (chash_iter_is_empty(iter))
+ empty++;
+ else
+ tombstones++;
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ pr_debug("chash: key size %u, value size %u\n",
+ table->key_size, table->value_size);
+ pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
+ 1 << table->bits, filled, empty, tombstones);
+ if (table->hits > 0) {
+ DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
+ DIV_FRAC(table->hits * 1000, table->hits_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits, quot1, frac1, quot2, frac2);
+ if (table->miss > 0) {
+ DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
+ DIV_FRAC(table->miss * 1000, table->miss_time_ns,
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->miss, quot1, frac1, quot2, frac2);
+ if (table->hits + table->miss > 0) {
+ DIV_FRAC(table->hits_steps + table->miss_steps,
+ table->hits + table->miss, quot1, frac1, 1000);
+ DIV_FRAC((table->hits + table->miss) * 1000,
+ (table->hits_time_ns + table->miss_time_ns),
+ quot2, frac2, 1000);
+ } else {
+ quot1 = quot2 = 0;
+ frac1 = frac2 = 0;
+ }
+ pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
+ table->hits + table->miss, quot1, frac1, quot2, frac2);
+ if (table->relocs > 0) {
+ DIV_FRAC(table->hits + table->miss, table->relocs,
+ quot1, frac1, 1000);
+ DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
+ pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
+ table->relocs, quot1, frac1, quot2, frac2);
+ } else {
+ pr_debug(" No relocations\n");
+ }
+}
+EXPORT_SYMBOL(__chash_table_dump_stats);
+
+#undef DIV_FRAC
+#endif
+
+#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
+#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
+#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
+#define CHASH_IN_RANGE(table, slot, first, last) \
+ (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
+
+/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
+#ifdef CHASH_DEBUG
+static void chash_table_dump(struct __chash_table *table)
+{
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if ((iter.slot & 3) == 0)
+ pr_debug("%04x: ", iter.slot);
+
+ if (chash_iter_is_valid(iter))
+ pr_debug("[%016llx] ", chash_iter_key(iter));
+ else if (chash_iter_is_empty(iter))
+ pr_debug("[ <empty> ] ");
+ else
+ pr_debug("[ <tombstone> ] ");
+
+ if ((iter.slot & 3) == 3)
+ pr_debug("\n");
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ if ((iter.slot & 3) != 0)
+ pr_debug("\n");
+}
+
+static int chash_table_check(struct __chash_table *table)
+{
+ u32 hash;
+ struct chash_iter iter = CHASH_ITER_INIT(table, 0);
+ struct chash_iter cur = CHASH_ITER_INIT(table, 0);
+
+ do {
+ if (!chash_iter_is_valid(iter)) {
+ CHASH_ITER_INC(iter);
+ continue;
+ }
+
+ hash = chash_iter_hash(iter);
+ CHASH_ITER_SET(cur, hash);
+ while (cur.slot != iter.slot) {
+ if (chash_iter_is_empty(cur)) {
+ pr_err("Path to element at %x with hash %x broken at slot %x\n",
+ iter.slot, hash, cur.slot);
+ chash_table_dump(table);
+ return -EINVAL;
+ }
+ CHASH_ITER_INC(cur);
+ }
+
+ CHASH_ITER_INC(iter);
+ } while (iter.slot);
+
+ return 0;
+}
+#endif
+
+static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
+{
+ BUG_ON(src.table == dst.table && src.slot == dst.slot);
+ BUG_ON(src.table->key_size != src.table->key_size);
+ BUG_ON(src.table->value_size != src.table->value_size);
+
+ if (dst.table->key_size == 4)
+ dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
+ else
+ dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
+
+ if (dst.table->value_size)
+ memcpy(chash_iter_value(dst), chash_iter_value(src),
+ dst.table->value_size);
+
+ chash_iter_set_valid(dst);
+ chash_iter_set_invalid(src);
+
+#ifdef CONFIG_CHASH_STATS
+ if (src.table == dst.table) {
+ dst.table->relocs++;
+ dst.table->reloc_dist +=
+ CHASH_SUB(dst.table, src.slot, dst.slot);
+ }
+#endif
+}
+
+/**
+ * __chash_table_find - Helper for looking up a hash table entry
+ * @iter: Pointer to hash table iterator
+ * @key: Key of the entry to find
+ * @for_removal: set to true if the element will be removed soon
+ *
+ * Searches for an entry in the hash table with a given key. iter must
+ * be initialized by the caller to point to the home position of the
+ * hypothetical entry, i.e. it must be initialized with the hash table
+ * and the key's hash as the initial slot for the search.
+ *
+ * This function also does some local clean-up to speed up future
+ * look-ups by relocating entries to better slots and removing
+ * tombstones that are no longer needed.
+ *
+ * If @for_removal is true, the function avoids relocating the entry
+ * that is being returned.
+ *
+ * Returns 0 if the search is successful. In this case iter is updated
+ * to point to the found entry. Otherwise %-EINVAL is returned and the
+ * iter is updated to point to the first available slot for the given
+ * key. If the table is full, the slot is set to -1.
+ */
+static int chash_table_find(struct chash_iter *iter, u64 key,
+ bool for_removal)
+{
+#ifdef CONFIG_CHASH_STATS
+ u64 ts1 = local_clock();
+#endif
+ u32 hash = iter->slot;
+ struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
+ int first_avail = (for_removal ? -2 : -1);
+
+ while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
+ if (chash_iter_is_empty(*iter)) {
+ /* Found an empty slot, which ends the
+ * search. Clean up any preceding tombstones
+ * that are no longer needed because they lead
+ * to no-where
+ */
+ if ((int)first_redundant.slot < 0)
+ goto not_found;
+ while (first_redundant.slot != iter->slot) {
+ if (!chash_iter_is_valid(first_redundant))
+ chash_iter_set_empty(first_redundant);
+ CHASH_ITER_INC(first_redundant);
+ }
+#ifdef CHASH_DEBUG
+ chash_table_check(iter->table);
+#endif
+ goto not_found;
+ } else if (!chash_iter_is_valid(*iter)) {
+ /* Found a tombstone. Remember it as candidate
+ * for relocating the entry we're looking for
+ * or for adding a new entry with the given key
+ */
+ if (first_avail == -1)
+ first_avail = iter->slot;
+ /* Or mark it as the start of a series of
+ * potentially redundant tombstones
+ */
+ else if (first_redundant.slot == -1)
+ CHASH_ITER_SET(first_redundant, iter->slot);
+ } else if (first_redundant.slot >= 0) {
+ /* Found a valid, occupied slot with a
+ * preceding series of tombstones. Relocate it
+ * to a better position that no longer depends
+ * on those tombstones
+ */
+ u32 cur_hash = chash_iter_hash(*iter);
+
+ if (!CHASH_IN_RANGE(iter->table, cur_hash,
+ first_redundant.slot + 1,
+ iter->slot)) {
+ /* This entry has a hash at or before
+ * the first tombstone we found. We
+ * can relocate it to that tombstone
+ * and advance to the next tombstone
+ */
+ chash_iter_relocate(first_redundant, *iter);
+ do {
+ CHASH_ITER_INC(first_redundant);
+ } while (chash_iter_is_valid(first_redundant));
+ } else if (cur_hash != iter->slot) {
+ /* Relocate entry to its home position
+ * or as close as possible so it no
+ * longer depends on any preceding
+ * tombstones
+ */
+ struct chash_iter new_iter =
+ CHASH_ITER_INIT(iter->table, cur_hash);
+
+ while (new_iter.slot != iter->slot &&
+ chash_iter_is_valid(new_iter))
+ CHASH_ITER_INC(new_iter);
+
+ if (new_iter.slot != iter->slot)
+ chash_iter_relocate(new_iter, *iter);
+ }
+ }
+
+ CHASH_ITER_INC(*iter);
+ if (iter->slot == hash) {
+ iter->slot = -1;
+ goto not_found;
+ }
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits++;
+ iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0) {
+ CHASH_ITER_SET(first_redundant, first_avail);
+ chash_iter_relocate(first_redundant, *iter);
+ iter->slot = first_redundant.slot;
+ iter->mask = first_redundant.mask;
+ }
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->hits_time_ns += local_clock() - ts1;
+#endif
+
+ return 0;
+
+not_found:
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss++;
+ iter->table->miss_steps += (iter->slot < 0) ?
+ (1 << iter->table->bits) :
+ CHASH_SUB(iter->table, iter->slot, hash) + 1;
+#endif
+
+ if (first_avail >= 0)
+ CHASH_ITER_SET(*iter, first_avail);
+
+#ifdef CONFIG_CHASH_STATS
+ iter->table->miss_time_ns += local_clock() - ts1;
+#endif
+
+ return -EINVAL;
+}
+
+int __chash_table_copy_in(struct __chash_table *table, u64 key,
+ const void *value)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, false);
+
+ /* Found an existing entry */
+ if (!r) {
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value,
+ table->value_size);
+ return 1;
+ }
+
+ /* Is there a place to add a new entry? */
+ if (iter.slot < 0) {
+ pr_err("Hash table overflow\n");
+ return -ENOMEM;
+ }
+
+ chash_iter_set_valid(iter);
+
+ if (table->key_size == 4)
+ table->keys32[iter.slot] = key;
+ else
+ table->keys64[iter.slot] = key;
+ if (value && table->value_size)
+ memcpy(chash_iter_value(iter), value, table->value_size);
+
+ return 0;
+}
+EXPORT_SYMBOL(__chash_table_copy_in);
+
+int __chash_table_copy_out(struct __chash_table *table, u64 key,
+ void *value, bool remove)
+{
+ u32 hash = (table->key_size == 4) ?
+ hash_32(key, table->bits) : hash_64(key, table->bits);
+ struct chash_iter iter = CHASH_ITER_INIT(table, hash);
+ int r = chash_table_find(&iter, key, remove);
+
+ if (r < 0)
+ return r;
+
+ if (value && table->value_size)
+ memcpy(value, chash_iter_value(iter), table->value_size);
+
+ if (remove)
+ chash_iter_set_invalid(iter);
+
+ return iter.slot;
+}
+EXPORT_SYMBOL(__chash_table_copy_out);
+
+#ifdef CONFIG_CHASH_SELFTEST
+/**
+ * chash_self_test - Run a self-test of the hash table implementation
+ * @bits: Table size will be 2^bits entries
+ * @key_size: Size of hash keys in bytes, 4 or 8
+ * @min_fill: Minimum fill level during the test
+ * @max_fill: Maximum fill level during the test
+ * @iterations: Number of test iterations
+ *
+ * The test adds and removes entries from a hash table, cycling the
+ * fill level between min_fill and max_fill entries. Also tests lookup
+ * and value retrieval.
+ */
+static int __init chash_self_test(u8 bits, u8 key_size,
+ int min_fill, int max_fill,
+ u64 iterations)
+{
+ struct chash_table table;
+ int ret;
+ u64 add_count, rmv_count;
+ u64 value;
+
+ if (key_size == 4 && iterations > 0xffffffff)
+ return -EINVAL;
+ if (min_fill >= max_fill)
+ return -EINVAL;
+
+ ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
+ GFP_KERNEL);
+ if (ret) {
+ pr_err("chash_table_alloc failed: %d\n", ret);
+ return ret;
+ }
+
+ for (add_count = 0, rmv_count = 0; add_count < iterations;
+ add_count++) {
+ /* When we hit the max_fill level, remove entries down
+ * to min_fill
+ */
+ if (add_count - rmv_count == max_fill) {
+ u64 find_count = rmv_count;
+
+ /* First try to find all entries that we're
+ * about to remove, confirm their value, test
+ * writing them back a second time.
+ */
+ for (; add_count - find_count > min_fill;
+ find_count++) {
+ ret = chash_table_copy_out(&table, find_count,
+ &value);
+ if (ret < 0) {
+ pr_err("chash_table_copy_out failed: %d\n",
+ ret);
+ goto out;
+ }
+ if (value != ~find_count) {
+ pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
+ find_count, ~find_count, value);
+#ifdef CHASH_DEBUG
+ chash_table_dump(&table.table);
+#endif
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = chash_table_copy_in(&table, find_count,
+ &value);
+ if (ret != 1) {
+ pr_err("copy_in second time returned %d, expected 1\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ /* Remove them until we hit min_fill level */
+ for (; add_count - rmv_count > min_fill; rmv_count++) {
+ ret = chash_table_remove(&table, rmv_count,
+ NULL);
+ if (ret < 0) {
+ pr_err("chash_table_remove failed: %d\n",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+ /* Add a new value */
+ value = ~add_count;
+ ret = chash_table_copy_in(&table, add_count, &value);
+ if (ret != 0) {
+ pr_err("copy_in first time returned %d, expected 0\n",
+ ret);
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ chash_table_dump_stats(&table);
+ chash_table_reset_stats(&table);
+
+out:
+ chash_table_free(&table);
+ return ret;
+}
+
+static unsigned int chash_test_bits = 10;
+MODULE_PARM_DESC(test_bits,
+ "Selftest number of hash bits ([4..20], default=10)");
+module_param_named(test_bits, chash_test_bits, uint, 0444);
+
+static unsigned int chash_test_keysize = 8;
+MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
+module_param_named(test_keysize, chash_test_keysize, uint, 0444);
+
+static unsigned int chash_test_minfill;
+MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
+module_param_named(test_minfill, chash_test_minfill, uint, 0444);
+
+static unsigned int chash_test_maxfill;
+MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
+module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
+
+static unsigned long chash_test_iters;
+MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
+module_param_named(test_iters, chash_test_iters, ulong, 0444);
+
+static int __init chash_init(void)
+{
+ int ret;
+ u64 ts1_ns;
+
+ /* Skip self test on user errors */
+ if (chash_test_bits < 4 || chash_test_bits > 20) {
+ pr_err("chash: test_bits out of range [4..20].\n");
+ return 0;
+ }
+ if (chash_test_keysize != 4 && chash_test_keysize != 8) {
+ pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
+ return 0;
+ }
+
+ if (!chash_test_minfill)
+ chash_test_minfill = (1 << chash_test_bits) / 2;
+ if (!chash_test_maxfill)
+ chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
+ if (!chash_test_iters)
+ chash_test_iters = (1 << chash_test_bits) * 1000;
+
+ if (chash_test_minfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_minfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_maxfill >= (1 << chash_test_bits)) {
+ pr_err("chash: test_maxfill too big. Must be < table size.\n");
+ return 0;
+ }
+ if (chash_test_minfill >= chash_test_maxfill) {
+ pr_err("chash: test_minfill must be < test_maxfill.\n");
+ return 0;
+ }
+ if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
+ pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
+ return 0;
+ }
+
+ ts1_ns = local_clock();
+ ret = chash_self_test(chash_test_bits, chash_test_keysize,
+ chash_test_minfill, chash_test_maxfill,
+ chash_test_iters);
+ if (!ret) {
+ u64 ts_delta_us = local_clock() - ts1_ns;
+ u64 iters_per_second = (u64)chash_test_iters * 1000000;
+
+ do_div(ts_delta_us, 1000);
+ do_div(iters_per_second, ts_delta_us);
+ pr_info("chash: self test took %llu us, %llu iterations/s\n",
+ ts_delta_us, iters_per_second);
+ } else {
+ pr_err("chash: self test failed: %d\n", ret);
+ }
+
+ return ret;
+}
+
+module_init(chash_init);
+
+#endif /* CONFIG_CHASH_SELFTEST */
+
+MODULE_DESCRIPTION("Closed hash table");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 4e132b936e3d..68b417ac94dd 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -4,12 +4,11 @@ subdir-ccflags-y += \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/powerplay/hwmgr \
- -I$(FULL_AMD_PATH)/powerplay/eventmgr
+ -I$(FULL_AMD_PATH)/powerplay/hwmgr
AMD_PP_PATH = ../powerplay
-PP_LIBS = smumgr hwmgr eventmgr
+PP_LIBS = smumgr hwmgr
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index f73e80c4bf33..9f3f3b8cf64f 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -29,22 +29,19 @@
#include "amd_powerplay.h"
#include "pp_instance.h"
#include "power_state.h"
-#include "eventmanager.h"
-
static inline int pp_check(struct pp_instance *handle)
{
if (handle == NULL || handle->pp_valid != PP_VALID)
return -EINVAL;
- if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL)
+ if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
return -EINVAL;
if (handle->pm_en == 0)
return PP_DPM_DISABLED;
- if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL
- || handle->eventmgr == NULL)
+ if (handle->hwmgr->hwmgr_func == NULL)
return PP_DPM_DISABLED;
return 0;
@@ -55,46 +52,32 @@ static int pp_early_init(void *handle)
int ret;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
- ret = smum_early_init(pp_handle);
+ ret = hwmgr_early_init(pp_handle);
if (ret)
- return ret;
+ return -EINVAL;
if ((pp_handle->pm_en == 0)
|| cgs_is_virtualization_enabled(pp_handle->device))
return PP_DPM_DISABLED;
- ret = hwmgr_early_init(pp_handle);
- if (ret) {
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
-
- ret = eventmgr_early_init(pp_handle);
- if (ret) {
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->pm_en = 0;
- return PP_DPM_DISABLED;
- }
-
return 0;
}
static int pp_sw_init(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_init == NULL)
+ if (hwmgr->smumgr_funcs->smu_init == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_init(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
pr_info("amdgpu: powerplay sw initialized\n");
}
@@ -103,40 +86,39 @@ static int pp_sw_init(void *handle)
static int pp_sw_fini(void *handle)
{
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->smu_fini == NULL)
+ if (hwmgr->smumgr_funcs->smu_fini == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->smu_fini(smumgr);
+ ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
}
return ret;
}
static int pp_hw_init(void *handle)
{
- struct pp_smumgr *smumgr;
- struct pp_eventmgr *eventmgr;
int ret = 0;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
+ struct pp_hwmgr *hwmgr;
ret = pp_check(pp_handle);
if (ret == 0 || ret == PP_DPM_DISABLED) {
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- if(smumgr->smumgr_funcs->start_smu(smumgr)) {
+ if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
return -EINVAL;;
}
if (ret == PP_DPM_DISABLED)
@@ -146,38 +128,21 @@ static int pp_hw_init(void *handle)
ret = hwmgr_hw_init(pp_handle);
if (ret)
goto err;
-
- eventmgr = pp_handle->eventmgr;
- if (eventmgr->pp_eventmgr_init == NULL ||
- eventmgr->pp_eventmgr_init(eventmgr))
- goto err;
-
return 0;
err:
pp_handle->pm_en = 0;
- kfree(pp_handle->eventmgr);
- kfree(pp_handle->hwmgr);
- pp_handle->hwmgr = NULL;
- pp_handle->eventmgr = NULL;
return PP_DPM_DISABLED;
}
static int pp_hw_fini(void *handle)
{
- struct pp_eventmgr *eventmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
ret = pp_check(pp_handle);
-
- if (ret == 0) {
- eventmgr = pp_handle->eventmgr;
-
- if (eventmgr->pp_eventmgr_fini != NULL)
- eventmgr->pp_eventmgr_fini(eventmgr);
-
+ if (ret == 0)
hwmgr_hw_fini(pp_handle);
- }
+
return 0;
}
@@ -244,8 +209,6 @@ static int pp_set_powergating_state(void *handle,
static int pp_suspend(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
@@ -256,17 +219,12 @@ static int pp_suspend(void *handle)
else if (ret != 0)
return ret;
- eventmgr = pp_handle->eventmgr;
- pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
-
- return 0;
+ return hwmgr_hw_suspend(pp_handle);
}
static int pp_resume(void *handle)
{
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
- struct pp_smumgr *smumgr;
+ struct pp_hwmgr *hwmgr;
int ret, ret1;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -275,26 +233,22 @@ static int pp_resume(void *handle)
if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
return ret1;
- smumgr = pp_handle->smu_mgr;
+ hwmgr = pp_handle->hwmgr;
- if (smumgr->smumgr_funcs->start_smu == NULL)
+ if (hwmgr->smumgr_funcs->start_smu == NULL)
return -EINVAL;
- ret = smumgr->smumgr_funcs->start_smu(smumgr);
+ ret = hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr);
if (ret) {
pr_err("smc start failed\n");
- smumgr->smumgr_funcs->smu_fini(smumgr);
+ hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
return ret;
}
if (ret1 == PP_DPM_DISABLED)
return 0;
- eventmgr = pp_handle->eventmgr;
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
-
- return 0;
+ return hwmgr_hw_resume(pp_handle);
}
const struct amd_ip_funcs pp_ip_funcs = {
@@ -324,6 +278,42 @@ static int pp_dpm_fw_loading_complete(void *handle)
return 0;
}
+static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (!(hwmgr->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ hwmgr->saved_dpm_level = hwmgr->dpm_level;
+ hwmgr->en_umd_pstate = true;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = hwmgr->saved_dpm_level;
+ hwmgr->en_umd_pstate = false;
+ cgs_set_clockgating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ cgs_set_powergating_state(hwmgr->device,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+}
+
static int pp_dpm_force_performance_level(void *handle,
enum amd_dpm_forced_level level)
{
@@ -338,13 +328,22 @@ static int pp_dpm_force_performance_level(void *handle,
hwmgr = pp_handle->hwmgr;
+ if (level == hwmgr->dpm_level)
+ return 0;
+
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ pp_dpm_en_umd_pstate(hwmgr, &level);
+ hwmgr->request_dpm_level = level;
+ hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
+ ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
+ if (!ret)
+ hwmgr->dpm_level = hwmgr->request_dpm_level;
+
mutex_unlock(&pp_handle->pp_lock);
return 0;
}
@@ -369,11 +368,12 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
return level;
}
-static int pp_dpm_get_sclk(void *handle, bool low)
+static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
@@ -387,16 +387,17 @@ static int pp_dpm_get_sclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_get_mclk(void *handle, bool low)
+static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t clk = 0;
ret = pp_check(pp_handle);
@@ -410,12 +411,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
+ clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return clk;
}
-static int pp_dpm_powergate_vce(void *handle, bool gate)
+static void pp_dpm_powergate_vce(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -424,21 +425,20 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
ret = pp_check(pp_handle);
if (ret != 0)
- return ret;
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_powergate_uvd(void *handle, bool gate)
+static void pp_dpm_powergate_uvd(void *handle, bool gate)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -447,74 +447,34 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
ret = pp_check(pp_handle);
if (ret != 0)
- return ret;
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
+ hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
-{
- switch (state) {
- case POWER_STATE_TYPE_BATTERY:
- return PP_StateUILabel_Battery;
- case POWER_STATE_TYPE_BALANCED:
- return PP_StateUILabel_Balanced;
- case POWER_STATE_TYPE_PERFORMANCE:
- return PP_StateUILabel_Performance;
- default:
- return PP_StateUILabel_None;
- }
-}
-
-static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
+static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
void *input, void *output)
{
int ret = 0;
- struct pem_event_data data = { {0} };
struct pp_instance *pp_handle = (struct pp_instance *)handle;
ret = pp_check(pp_handle);
if (ret != 0)
return ret;
- mutex_lock(&pp_handle->pp_lock);
- switch (event_id) {
- case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_ENABLE_USER_STATE:
- {
- enum amd_pm_state_type ps;
-
- if (input == NULL) {
- ret = -EINVAL;
- break;
- }
- ps = *(unsigned long *)input;
- data.requested_ui_label = power_state_convert(ps);
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- }
- case AMD_PP_EVENT_COMPLETE_INIT:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- case AMD_PP_EVENT_READJUST_POWER_STATE:
- ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
- break;
- default:
- break;
- }
+ mutex_lock(&pp_handle->pp_lock);
+ ret = hwmgr_handle_task(pp_handle, task_id, input, output);
mutex_unlock(&pp_handle->pp_lock);
+
return ret;
}
@@ -562,7 +522,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
return pm_type;
}
-static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
@@ -571,25 +531,25 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
ret = pp_check(pp_handle);
if (ret != 0)
- return ret;
+ return;
hwmgr = pp_handle->hwmgr;
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
pr_info("%s was not implemented.\n", __func__);
- return 0;
+ return;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
+ hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
}
-static int pp_dpm_get_fan_control_mode(void *handle)
+static uint32_t pp_dpm_get_fan_control_mode(void *handle)
{
struct pp_hwmgr *hwmgr;
struct pp_instance *pp_handle = (struct pp_instance *)handle;
int ret = 0;
+ uint32_t mode = 0;
ret = pp_check(pp_handle);
@@ -603,9 +563,9 @@ static int pp_dpm_get_fan_control_mode(void *handle)
return 0;
}
mutex_lock(&pp_handle->pp_lock);
- ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+ mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
mutex_unlock(&pp_handle->pp_lock);
- return ret;
+ return mode;
}
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
@@ -1128,7 +1088,7 @@ static int pp_dpm_switch_power_profile(void *handle,
return 0;
}
-const struct amd_powerplay_funcs pp_dpm_funcs = {
+const struct amd_pm_funcs pp_dpm_funcs = {
.get_temperature = pp_dpm_get_temperature,
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1189,15 +1149,9 @@ int amd_powerplay_destroy(void *handle)
{
struct pp_instance *instance = (struct pp_instance *)handle;
- if (instance->pm_en) {
- kfree(instance->eventmgr);
- kfree(instance->hwmgr);
- instance->hwmgr = NULL;
- instance->eventmgr = NULL;
- }
+ kfree(instance->hwmgr);
+ instance->hwmgr = NULL;
- kfree(instance->smu_mgr);
- instance->smu_mgr = NULL;
kfree(instance);
instance = NULL;
return 0;
@@ -1206,18 +1160,16 @@ int amd_powerplay_destroy(void *handle)
int amd_powerplay_reset(void *handle)
{
struct pp_instance *instance = (struct pp_instance *)handle;
- struct pp_eventmgr *eventmgr;
- struct pem_event_data event_data = { {0} };
int ret;
- if (cgs_is_virtualization_enabled(instance->smu_mgr->device))
+ if (cgs_is_virtualization_enabled(instance->hwmgr->device))
return PP_DPM_DISABLED;
ret = pp_check(instance);
if (ret != 0)
return ret;
- ret = pp_hw_fini(handle);
+ ret = pp_hw_fini(instance);
if (ret)
return ret;
@@ -1225,16 +1177,7 @@ int amd_powerplay_reset(void *handle)
if (ret)
return PP_DPM_DISABLED;
- eventmgr = instance->eventmgr;
-
- if (eventmgr->pp_eventmgr_init == NULL)
- return PP_DPM_DISABLED;
-
- ret = eventmgr->pp_eventmgr_init(eventmgr);
- if (ret)
- return ret;
-
- return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
+ return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
}
/* export this function to DAL */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile b/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
deleted file mode 100644
index 7509e3850087..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the 'event manager' sub-component of powerplay.
-# It provides the event management services for the driver.
-
-EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
- eventactionchains.o eventsubchains.o eventtasks.o psm.o
-
-AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
-
-AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
deleted file mode 100644
index 8cee4e0f9fde..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ /dev/null
@@ -1,291 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventactionchains.h"
-#include "eventsubchains.h"
-
-static const pem_event_action * const initialize_event[] = {
- block_adjust_power_state_tasks,
- power_budget_tasks,
- system_config_tasks,
- setup_asic_tasks,
- enable_dynamic_state_management_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- initialize_thermal_controller_tasks,
- conditionally_force_3d_performance_state_tasks,
- process_vbios_eventinfo_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain initialize_action_chain = {
- "Initialize",
- initialize_event
-};
-
-static const pem_event_action * const uninitialize_event[] = {
- ungate_all_display_phys_tasks,
- uninitialize_display_phy_access_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- uninitialize_thermal_controller_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_dynamic_state_management_tasks,
- disable_clock_power_gatings_tasks,
- cleanup_asic_tasks,
- prepare_for_pnp_stop_tasks,
- NULL
-};
-
-const struct action_chain uninitialize_action_chain = {
- "Uninitialize",
- uninitialize_event
-};
-
-static const pem_event_action * const power_source_change_event_pp_enabled[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_pp_enabled = {
- "Power source change - PowerPlay enabled",
- power_source_change_event_pp_enabled
-};
-
-static const pem_event_action * const power_source_change_event_pp_disabled[] = {
- set_power_source_tasks,
- set_nbmcu_state_tasks,
- NULL
-};
-
-const struct action_chain power_source_changes_action_chain_pp_disabled = {
- "Power source change - PowerPlay disabled",
- power_source_change_event_pp_disabled
-};
-
-static const pem_event_action * const power_source_change_event_hardware_dc[] = {
- set_power_source_tasks,
- set_power_saving_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- reset_hardware_dc_notification_tasks,
- set_nbmcu_state_tasks,
- broadcast_power_policy_tasks,
- NULL
-};
-
-const struct action_chain power_source_change_action_chain_hardware_dc = {
- "Power source change - with Hardware DC switching",
- power_source_change_event_hardware_dc
-};
-
-static const pem_event_action * const suspend_event[] = {
- reset_display_phy_access_tasks,
- unregister_interrupt_tasks,
- disable_gfx_voltage_island_power_gating_tasks,
- disable_gfx_clock_gating_tasks,
- notify_smu_suspend_tasks,
- disable_smc_firmware_ctf_tasks,
- set_boot_state_tasks,
- adjust_power_state_tasks,
- disable_fps_tasks,
- vari_bright_suspend_tasks,
- reset_fan_speed_to_default_tasks,
- power_down_asic_tasks,
- disable_stutter_mode_tasks,
- set_connected_standby_tasks,
- block_hw_access_tasks,
- NULL
-};
-
-const struct action_chain suspend_action_chain = {
- "Suspend",
- suspend_event
-};
-
-static const pem_event_action * const resume_event[] = {
- unblock_hw_access_tasks,
- resume_connected_standby_tasks,
- notify_smu_resume_tasks,
- reset_display_configCounter_tasks,
- update_dal_configuration_tasks,
- vari_bright_resume_tasks,
- setup_asic_tasks,
- enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
- enable_dynamic_state_management_tasks,
- enable_disable_bapm_tasks,
- initialize_thermal_controller_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- enable_disable_fps_tasks,
- notify_hw_power_source_tasks,
- process_vbios_event_info_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- reset_clock_gating_tasks,
- notify_smu_vpu_recovery_end_tasks,
- disable_vpu_cap_tasks,
- execute_escape_sequence_tasks,
- NULL
-};
-
-
-const struct action_chain resume_action_chain = {
- "resume",
- resume_event
-};
-
-static const pem_event_action * const complete_init_event[] = {
- unblock_adjust_power_state_tasks,
- adjust_power_state_tasks,
- enable_gfx_clock_gating_tasks,
- enable_gfx_voltage_island_power_gating_tasks,
- notify_power_state_change_tasks,
- NULL
-};
-
-const struct action_chain complete_init_action_chain = {
- "complete init",
- complete_init_event
-};
-
-static const pem_event_action * const enable_gfx_clock_gating_event[] = {
- enable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain enable_gfx_clock_gating_action_chain = {
- "enable gfx clock gate",
- enable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const disable_gfx_clock_gating_event[] = {
- disable_gfx_clock_gating_tasks,
- NULL
-};
-
-const struct action_chain disable_gfx_clock_gating_action_chain = {
- "disable gfx clock gate",
- disable_gfx_clock_gating_event
-};
-
-static const pem_event_action * const enable_cgpg_event[] = {
- enable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain enable_cgpg_action_chain = {
- "eable cg pg",
- enable_cgpg_event
-};
-
-static const pem_event_action * const disable_cgpg_event[] = {
- disable_cgpg_tasks,
- NULL
-};
-
-const struct action_chain disable_cgpg_action_chain = {
- "disable cg pg",
- disable_cgpg_event
-};
-
-
-/* Enable user _2d performance and activate */
-
-static const pem_event_action * const enable_user_state_event[] = {
- create_new_user_performance_state_tasks,
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_state_action_chain = {
- "Enable user state",
- enable_user_state_event
-};
-
-static const pem_event_action * const enable_user_2d_performance_event[] = {
- enable_user_2d_performance_tasks,
- add_user_2d_performance_state_tasks,
- set_performance_state_tasks,
- adjust_power_state_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain enable_user_2d_performance_action_chain = {
- "enable_user_2d_performance_event_activate",
- enable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const disable_user_2d_performance_event[] = {
- disable_user_2d_performance_tasks,
- delete_user_2d_performance_state_tasks,
- NULL
-};
-
-const struct action_chain disable_user_2d_performance_action_chain = {
- "disable_user_2d_performance_event",
- disable_user_2d_performance_event
-};
-
-
-static const pem_event_action * const display_config_change_event[] = {
- /* countDisplayConfigurationChangeEventTasks, */
- unblock_adjust_power_state_tasks,
- set_cpu_power_state,
- notify_hw_power_source_tasks,
- get_2d_performance_state_tasks,
- set_performance_state_tasks,
- /* updateDALConfigurationTasks,
- variBrightDisplayConfigurationChangeTasks, */
- adjust_power_state_tasks,
- /*enableDisableFPSTasks,
- setNBMCUStateTasks,
- notifyPCIEDeviceReadyTasks,*/
- NULL
-};
-
-const struct action_chain display_config_change_action_chain = {
- "Display configuration change",
- display_config_change_event
-};
-
-static const pem_event_action * const readjust_power_state_event[] = {
- adjust_power_state_tasks,
- NULL
-};
-
-const struct action_chain readjust_power_state_action_chain = {
- "re-adjust power state",
- readjust_power_state_event
-};
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
deleted file mode 100644
index f181e53cdcda..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_ACTION_CHAINS_H_
-#define _EVENT_ACTION_CHAINS_H_
-#include "eventmgr.h"
-
-extern const struct action_chain initialize_action_chain;
-
-extern const struct action_chain uninitialize_action_chain;
-
-extern const struct action_chain power_source_change_action_chain_pp_enabled;
-
-extern const struct action_chain power_source_changes_action_chain_pp_disabled;
-
-extern const struct action_chain power_source_change_action_chain_hardware_dc;
-
-extern const struct action_chain suspend_action_chain;
-
-extern const struct action_chain resume_action_chain;
-
-extern const struct action_chain complete_init_action_chain;
-
-extern const struct action_chain enable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain disable_gfx_clock_gating_action_chain;
-
-extern const struct action_chain enable_cgpg_action_chain;
-
-extern const struct action_chain disable_cgpg_action_chain;
-
-extern const struct action_chain enable_user_2d_performance_action_chain;
-
-extern const struct action_chain disable_user_2d_performance_action_chain;
-
-extern const struct action_chain enable_user_state_action_chain;
-
-extern const struct action_chain readjust_power_state_action_chain;
-
-extern const struct action_chain display_config_change_action_chain;
-
-#endif /*_EVENT_ACTION_CHAINS_H_*/
-
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
deleted file mode 100644
index a3cd230d636d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "ppinterrupt.h"
-#include "hardwaremanager.h"
-
-void pem_init_feature_info(struct pp_eventmgr *eventmgr)
-{
-
- /* PowerPlay info */
- eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label =
- PP_StateUILabel_Performance;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable =
- PP_StateUILabel_Battery;
-
- eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label =
- PP_StateUILabel_Battery;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) {
- eventmgr->features[PP_Feature_PowerPlay].supported = true;
- eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = true;
- eventmgr->features[PP_Feature_PowerPlay].enabled = true;
- } else {
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled = false;
- eventmgr->features[PP_Feature_PowerPlay].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_Force3DClock].supported = true;
- eventmgr->features[PP_Feature_Force3DClock].enabled = false;
- eventmgr->features[PP_Feature_Force3DClock].enabled_default = false;
- eventmgr->features[PP_Feature_Force3DClock].version = 1;
-
- /* over drive*/
- eventmgr->features[PP_Feature_User2DPerformance].version = 4;
- eventmgr->features[PP_Feature_User3DPerformance].version = 4;
- eventmgr->features[PP_Feature_OverdriveTest].version = 4;
-
- eventmgr->features[PP_Feature_OverDrive].version = 4;
- eventmgr->features[PP_Feature_OverDrive].enabled = false;
- eventmgr->features[PP_Feature_OverDrive].enabled_default = false;
-
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled = false;
- eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled = false;
- eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false;
-
- eventmgr->features[PP_Feature_OverDrive].supported = false;
-
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false;
-
- /* Multi UVD States support */
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled = false;
- eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false;
-
- /* Dynamic UVD States support */
- eventmgr->features[PP_Feature_DynamicUVDState].supported = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled = false;
- eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false;
-
- /* VCE DPM support */
- eventmgr->features[PP_Feature_VCEDPM].supported = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled = false;
- eventmgr->features[PP_Feature_VCEDPM].enabled_default = false;
-
- /* ACP PowerGating support */
- eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false;
- eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false;
-
- /* PPM support */
- eventmgr->features[PP_Feature_PPM].version = 1;
- eventmgr->features[PP_Feature_PPM].supported = false;
- eventmgr->features[PP_Feature_PPM].enabled = false;
-
- /* FFC support (enables fan and temp settings, Gemini needs temp settings) */
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) ||
- phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) {
- eventmgr->features[PP_Feature_FFC].version = 1;
- eventmgr->features[PP_Feature_FFC].supported = true;
- eventmgr->features[PP_Feature_FFC].enabled = true;
- eventmgr->features[PP_Feature_FFC].enabled_default = true;
- } else {
- eventmgr->features[PP_Feature_FFC].supported = false;
- eventmgr->features[PP_Feature_FFC].enabled = false;
- eventmgr->features[PP_Feature_FFC].enabled_default = false;
- }
-
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_VariBright].enabled = false;
- eventmgr->features[PP_Feature_VariBright].enabled_default = false;
-
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].supported = false;
- eventmgr->features[PP_Feature_BACO].enabled_default = false;
-
- /* PowerDown feature support */
- eventmgr->features[PP_Feature_PowerDown].supported = false;
- eventmgr->features[PP_Feature_PowerDown].enabled = false;
- eventmgr->features[PP_Feature_PowerDown].enabled_default = false;
-
- eventmgr->features[PP_Feature_FPS].version = 1;
- eventmgr->features[PP_Feature_FPS].supported = false;
- eventmgr->features[PP_Feature_FPS].enabled_default = false;
- eventmgr->features[PP_Feature_FPS].enabled = false;
-
- eventmgr->features[PP_Feature_ViPG].version = 1;
- eventmgr->features[PP_Feature_ViPG].supported = false;
- eventmgr->features[PP_Feature_ViPG].enabled_default = false;
- eventmgr->features[PP_Feature_ViPG].enabled = false;
-}
-
-static int thermal_interrupt_callback(void *private_data,
- unsigned src_id, const uint32_t *iv_entry)
-{
- /* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
- pr_info("current thermal is out of range \n");
- return 0;
-}
-
-int pem_register_interrupts(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pp_interrupt_registration_info info;
-
- info.call_back = thermal_interrupt_callback;
- info.context = eventmgr;
-
- result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info);
-
- /* TODO:
- * 2. Register CTF event interrupt
- * 3. Register for vbios events interrupt
- * 4. Register External Throttle Interrupt
- * 5. Register Smc To Host Interrupt
- * */
- return result;
-}
-
-
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr)
-{
- return 0;
-}
-
-
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr)
-{
- eventmgr->features[PP_Feature_MultiUVDState].supported = false;
- eventmgr->features[PP_Feature_VariBright].supported = false;
- eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
- eventmgr->features[PP_Feature_OverDrive].supported = false;
- eventmgr->features[PP_Feature_OverdriveTest].supported = false;
- eventmgr->features[PP_Feature_User3DPerformance].supported = false;
- eventmgr->features[PP_Feature_User2DPerformance].supported = false;
- eventmgr->features[PP_Feature_PowerPlay].supported = false;
- eventmgr->features[PP_Feature_Force3DClock].supported = false;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
deleted file mode 100644
index cd1ca07ef7f7..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "eventmanagement.h"
-#include "eventmgr.h"
-#include "eventactionchains.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
-{
- int i;
-
- for (i = 0; i < AMD_PP_EVENT_MAX; i++)
- eventmgr->event_chain[i] = NULL;
-
- eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr);
- eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr);
- return 0;
-}
-
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
-{
- const pem_event_action * const *paction_chain;
- const pem_event_action *psub_chain;
- int tmp_result = 0;
- int result = 0;
-
- if (eventmgr == NULL || event_chain == NULL || event_data == NULL)
- return -EINVAL;
-
- for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) {
- if (0 != result)
- return result;
-
- for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) {
- tmp_result = (*psub_chain)(eventmgr, event_data);
- if (0 == result)
- result = tmp_result;
- }
- }
-
- return result;
-}
-
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &suspend_action_chain;
-}
-
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &initialize_action_chain;
-}
-
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &uninitialize_action_chain;
-}
-
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/
-}
-
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &resume_action_chain;
-}
-
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_gfx_clock_gating_action_chain;
-}
-
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &disable_cgpg_action_chain;
-}
-
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &complete_init_action_chain;
-}
-
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr)
-{
- return NULL;
-}
-
-const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &enable_user_state_action_chain;
-}
-
-const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &readjust_power_state_action_chain;
-}
-
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr)
-{
- return &display_config_change_action_chain;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
deleted file mode 100644
index 383d4b295aa9..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmanagement.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGEMENT_H_
-#define _EVENT_MANAGEMENT_H_
-
-#include "eventmgr.h"
-
-int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
-int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
-const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
-
-extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
-extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
-const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
-
-
-#endif /* _EVENT_MANAGEMENT_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
deleted file mode 100644
index 3e3ca03bd344..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "eventmgr.h"
-#include "hwmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-
-static int pem_init(struct pp_eventmgr *eventmgr)
-{
- int result = 0;
- struct pem_event_data event_data = { {0} };
-
- /* Initialize PowerPlay feature info */
- pem_init_feature_info(eventmgr);
-
- /* Initialize event action chains */
- pem_init_event_action_chains(eventmgr);
-
- /* Call initialization event */
- result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data);
-
- /* if (0 != result)
- return result; */
-
- /* Register interrupt callback functions */
- result = pem_register_interrupts(eventmgr);
- return 0;
-}
-
-static void pem_fini(struct pp_eventmgr *eventmgr)
-{
- struct pem_event_data event_data = { {0} };
-
- pem_uninit_featureInfo(eventmgr);
- pem_unregister_interrupts(eventmgr);
-
- pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
-}
-
-int eventmgr_early_init(struct pp_instance *handle)
-{
- struct pp_eventmgr *eventmgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL);
- if (eventmgr == NULL)
- return -ENOMEM;
-
- eventmgr->hwmgr = handle->hwmgr;
- handle->eventmgr = eventmgr;
-
- eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor);
- eventmgr->pp_eventmgr_init = pem_init;
- eventmgr->pp_eventmgr_fini = pem_fini;
-
- return 0;
-}
-
-static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data)
-{
- if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL)
- return -EINVAL;
-
- return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data);
-}
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data)
-{
- int r = 0;
-
- r = pem_handle_event_unlocked(eventmgr, event, event_data);
-
- return r;
-}
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr)
-{
- return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr));
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
deleted file mode 100644
index b82c43af59ab..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.c
+++ /dev/null
@@ -1,410 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventsubchains.h"
-#include "eventtasks.h"
-#include "hardwaremanager.h"
-
-const pem_event_action reset_display_phy_access_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action broadcast_power_policy_tasks[] = {
- /* PEM_Task_BroadcastPowerPolicyChange, */
- NULL
-};
-
-const pem_event_action unregister_interrupt_tasks[] = {
- pem_task_unregister_interrupts,
- NULL
-};
-
-/* Disable GFX Voltage Islands Power Gating */
-const pem_event_action disable_gfx_voltage_island_powergating_tasks[] = {
- pem_task_disable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action disable_gfx_clockgating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action block_adjust_power_state_tasks[] = {
- pem_task_block_adjust_power_state,
- NULL
-};
-
-
-const pem_event_action unblock_adjust_power_state_tasks[] = {
- pem_task_unblock_adjust_power_state,
- NULL
-};
-
-const pem_event_action set_performance_state_tasks[] = {
- pem_task_set_performance_state,
- NULL
-};
-
-const pem_event_action get_2d_performance_state_tasks[] = {
- pem_task_get_2D_performance_state_id,
- NULL
-};
-
-const pem_event_action conditionally_force3D_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action process_vbios_eventinfo_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_dynamic_state_management_tasks[] = {
- /* PEM_Task_ResetBAPMPolicyChangedFlag,*/
- pem_task_get_boot_state_id,
- pem_task_enable_dynamic_state_management,
- pem_task_register_interrupts,
- NULL
-};
-
-const pem_event_action enable_clock_power_gatings_tasks[] = {
- pem_task_enable_clock_power_gatings_tasks,
- pem_task_powerdown_uvd_tasks,
- pem_task_powerdown_vce_tasks,
- NULL
-};
-
-const pem_event_action setup_asic_tasks[] = {
- pem_task_setup_asic,
- NULL
-};
-
-const pem_event_action power_budget_tasks[] = {
- /* TODO
- * PEM_Task_PowerBudgetWaiverAvailable,
- * PEM_Task_PowerBudgetWarningMessage,
- * PEM_Task_PruneStatesBasedOnPowerBudget,
- */
- NULL
-};
-
-const pem_event_action system_config_tasks[] = {
- /* PEM_Task_PruneStatesBasedOnSystemConfig,*/
- NULL
-};
-
-
-const pem_event_action conditionally_force_3d_performance_state_tasks[] = {
- pem_task_conditionally_force_3d_performance_state,
- NULL
-};
-
-const pem_event_action ungate_all_display_phys_tasks[] = {
- /* PEM_Task_GetDisplayPhyAccessInfo */
- NULL
-};
-
-const pem_event_action uninitialize_display_phy_access_tasks[] = {
- /* PEM_Task_UninitializeDisplayPhysAccess, */
- NULL
-};
-
-const pem_event_action disable_gfx_voltage_island_power_gating_tasks[] = {
- /* PEM_Task_DisableVoltageIslandPowerGating, */
- NULL
-};
-
-const pem_event_action disable_gfx_clock_gating_tasks[] = {
- pem_task_disable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action set_boot_state_tasks[] = {
- pem_task_get_boot_state_id,
- pem_task_set_boot_state,
- NULL
-};
-
-const pem_event_action adjust_power_state_tasks[] = {
- pem_task_notify_hw_mgr_display_configuration_change,
- pem_task_adjust_power_state,
- pem_task_notify_smc_display_config_after_power_state_adjustment,
- pem_task_update_allowed_performance_levels,
- /* to do pem_task_Enable_disable_bapm, */
- NULL
-};
-
-const pem_event_action disable_dynamic_state_management_tasks[] = {
- pem_task_unregister_interrupts,
- pem_task_get_boot_state_id,
- pem_task_disable_dynamic_state_management,
- NULL
-};
-
-const pem_event_action disable_clock_power_gatings_tasks[] = {
- pem_task_disable_clock_power_gatings_tasks,
- NULL
-};
-
-const pem_event_action cleanup_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_cleanup_asic,
- NULL
-};
-
-const pem_event_action prepare_for_pnp_stop_tasks[] = {
- /* PEM_Task_PrepareForPnpStop,*/
- NULL
-};
-
-const pem_event_action set_power_source_tasks[] = {
- pem_task_set_power_source,
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action set_power_saving_state_tasks[] = {
- pem_task_reset_power_saving_state,
- pem_task_get_power_saving_state,
- pem_task_set_power_saving_state,
- /* PEM_Task_ResetODDCState,
- * PEM_Task_GetODDCState,
- * PEM_Task_SetODDCState,*/
- NULL
-};
-
-const pem_event_action enable_disable_fps_tasks[] = {
- /* PEM_Task_EnableDisableFPS,*/
- NULL
-};
-
-const pem_event_action set_nbmcu_state_tasks[] = {
- /* PEM_Task_NBMCUStateChange,*/
- NULL
-};
-
-const pem_event_action reset_hardware_dc_notification_tasks[] = {
- /* PEM_Task_ResetHardwareDCNotification,*/
- NULL
-};
-
-
-const pem_event_action notify_smu_suspend_tasks[] = {
- /* PEM_Task_NotifySMUSuspend,*/
- NULL
-};
-
-const pem_event_action disable_smc_firmware_ctf_tasks[] = {
- pem_task_disable_smc_firmware_ctf,
- NULL
-};
-
-const pem_event_action disable_fps_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- NULL
-};
-
-const pem_event_action vari_bright_suspend_tasks[] = {
- /* PEM_Task_VariBright_Suspend,*/
- NULL
-};
-
-const pem_event_action reset_fan_speed_to_default_tasks[] = {
- /* PEM_Task_ResetFanSpeedToDefault,*/
- NULL
-};
-
-const pem_event_action power_down_asic_tasks[] = {
- /* PEM_Task_DisableFPS,*/
- pem_task_power_down_asic,
- NULL
-};
-
-const pem_event_action disable_stutter_mode_tasks[] = {
- /* PEM_Task_DisableStutterMode,*/
- NULL
-};
-
-const pem_event_action set_connected_standby_tasks[] = {
- /* PEM_Task_SetConnectedStandby,*/
- NULL
-};
-
-const pem_event_action block_hw_access_tasks[] = {
- pem_task_block_hw_access,
- NULL
-};
-
-const pem_event_action unblock_hw_access_tasks[] = {
- pem_task_un_block_hw_access,
- NULL
-};
-
-const pem_event_action resume_connected_standby_tasks[] = {
- /* PEM_Task_ResumeConnectedStandby,*/
- NULL
-};
-
-const pem_event_action notify_smu_resume_tasks[] = {
- /* PEM_Task_NotifySMUResume,*/
- NULL
-};
-
-const pem_event_action reset_display_configCounter_tasks[] = {
- pem_task_reset_display_phys_access,
- NULL
-};
-
-const pem_event_action update_dal_configuration_tasks[] = {
- /* PEM_Task_CheckVBlankTime,*/
- NULL
-};
-
-const pem_event_action vari_bright_resume_tasks[] = {
- /* PEM_Task_VariBright_Resume,*/
- NULL
-};
-
-const pem_event_action notify_hw_power_source_tasks[] = {
- pem_task_notify_hw_of_power_source,
- NULL
-};
-
-const pem_event_action process_vbios_event_info_tasks[] = {
- /* PEM_Task_ProcessVbiosEventInfo,*/
- NULL
-};
-
-const pem_event_action enable_gfx_clock_gating_tasks[] = {
- pem_task_enable_gfx_clock_gating,
- NULL
-};
-
-const pem_event_action enable_gfx_voltage_island_power_gating_tasks[] = {
- pem_task_enable_voltage_island_power_gating,
- NULL
-};
-
-const pem_event_action reset_clock_gating_tasks[] = {
- /* PEM_Task_ResetClockGating*/
- NULL
-};
-
-const pem_event_action notify_smu_vpu_recovery_end_tasks[] = {
- /* PEM_Task_NotifySmuVPURecoveryEnd,*/
- NULL
-};
-
-const pem_event_action disable_vpu_cap_tasks[] = {
- /* PEM_Task_DisableVPUCap,*/
- NULL
-};
-
-const pem_event_action execute_escape_sequence_tasks[] = {
- /* PEM_Task_ExecuteEscapesequence,*/
- NULL
-};
-
-const pem_event_action notify_power_state_change_tasks[] = {
- pem_task_notify_power_state_change,
- NULL
-};
-
-const pem_event_action enable_cgpg_tasks[] = {
- pem_task_enable_cgpg,
- NULL
-};
-
-const pem_event_action disable_cgpg_tasks[] = {
- pem_task_disable_cgpg,
- NULL
-};
-
-const pem_event_action enable_user_2d_performance_tasks[] = {
- /* PEM_Task_SetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceEnableEvents,*/
- NULL
-};
-
-const pem_event_action add_user_2d_performance_state_tasks[] = {
- /* PEM_Task_Get2DPerformanceTemplate,*/
- /* PEM_Task_AllocateNewPowerStateMemory,*/
- /* PEM_Task_CopyNewPowerStateInfo,*/
- /* PEM_Task_UpdateNewPowerStateClocks,*/
- /* PEM_Task_UpdateNewPowerStateUser2DPerformanceFlag,*/
- /* PEM_Task_AddPowerState,*/
- /* PEM_Task_ReleaseNewPowerStateMemory,*/
- NULL
-};
-
-const pem_event_action delete_user_2d_performance_state_tasks[] = {
- /* PEM_Task_GetCurrentUser2DPerformanceStateID,*/
- /* PEM_Task_DeletePowerState,*/
- /* PEM_Task_SetCurrentUser2DPerformanceStateID,*/
- NULL
-};
-
-const pem_event_action disable_user_2d_performance_tasks[] = {
- /* PEM_Task_ResetUser2DPerformanceFlag,*/
- /* PEM_Task_UpdateUser2DPerformanceDisableEvents,*/
- NULL
-};
-
-const pem_event_action enable_stutter_mode_tasks[] = {
- pem_task_enable_stutter_mode,
- NULL
-};
-
-const pem_event_action enable_disable_bapm_tasks[] = {
- /*PEM_Task_EnableDisableBAPM,*/
- NULL
-};
-
-const pem_event_action reset_boot_state_tasks[] = {
- pem_task_reset_boot_state,
- NULL
-};
-
-const pem_event_action create_new_user_performance_state_tasks[] = {
- pem_task_create_user_performance_state,
- NULL
-};
-
-const pem_event_action initialize_thermal_controller_tasks[] = {
- pem_task_initialize_thermal_controller,
- NULL
-};
-
-const pem_event_action uninitialize_thermal_controller_tasks[] = {
- pem_task_uninitialize_thermal_controller,
- NULL
-};
-
-const pem_event_action set_cpu_power_state[] = {
- pem_task_set_cpu_power_state,
- NULL
-}; \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
deleted file mode 100644
index 7714cb927428..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventsubchains.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_SUB_CHAINS_H_
-#define _EVENT_SUB_CHAINS_H_
-
-#include "eventmgr.h"
-
-extern const pem_event_action reset_display_phy_access_tasks[];
-extern const pem_event_action broadcast_power_policy_tasks[];
-extern const pem_event_action unregister_interrupt_tasks[];
-extern const pem_event_action disable_GFX_voltage_island_powergating_tasks[];
-extern const pem_event_action disable_GFX_clockgating_tasks[];
-extern const pem_event_action block_adjust_power_state_tasks[];
-extern const pem_event_action unblock_adjust_power_state_tasks[];
-extern const pem_event_action set_performance_state_tasks[];
-extern const pem_event_action get_2D_performance_state_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action process_vbios_eventinfo_tasks[];
-extern const pem_event_action enable_dynamic_state_management_tasks[];
-extern const pem_event_action enable_clock_power_gatings_tasks[];
-extern const pem_event_action conditionally_force3D_performance_state_tasks[];
-extern const pem_event_action setup_asic_tasks[];
-extern const pem_event_action power_budget_tasks[];
-extern const pem_event_action system_config_tasks[];
-extern const pem_event_action get_2d_performance_state_tasks[];
-extern const pem_event_action conditionally_force_3d_performance_state_tasks[];
-extern const pem_event_action ungate_all_display_phys_tasks[];
-extern const pem_event_action uninitialize_display_phy_access_tasks[];
-extern const pem_event_action disable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action disable_gfx_clock_gating_tasks[];
-extern const pem_event_action set_boot_state_tasks[];
-extern const pem_event_action adjust_power_state_tasks[];
-extern const pem_event_action disable_dynamic_state_management_tasks[];
-extern const pem_event_action disable_clock_power_gatings_tasks[];
-extern const pem_event_action cleanup_asic_tasks[];
-extern const pem_event_action prepare_for_pnp_stop_tasks[];
-extern const pem_event_action set_power_source_tasks[];
-extern const pem_event_action set_power_saving_state_tasks[];
-extern const pem_event_action enable_disable_fps_tasks[];
-extern const pem_event_action set_nbmcu_state_tasks[];
-extern const pem_event_action reset_hardware_dc_notification_tasks[];
-extern const pem_event_action notify_smu_suspend_tasks[];
-extern const pem_event_action disable_smc_firmware_ctf_tasks[];
-extern const pem_event_action disable_fps_tasks[];
-extern const pem_event_action vari_bright_suspend_tasks[];
-extern const pem_event_action reset_fan_speed_to_default_tasks[];
-extern const pem_event_action power_down_asic_tasks[];
-extern const pem_event_action disable_stutter_mode_tasks[];
-extern const pem_event_action set_connected_standby_tasks[];
-extern const pem_event_action block_hw_access_tasks[];
-extern const pem_event_action unblock_hw_access_tasks[];
-extern const pem_event_action resume_connected_standby_tasks[];
-extern const pem_event_action notify_smu_resume_tasks[];
-extern const pem_event_action reset_display_configCounter_tasks[];
-extern const pem_event_action update_dal_configuration_tasks[];
-extern const pem_event_action vari_bright_resume_tasks[];
-extern const pem_event_action notify_hw_power_source_tasks[];
-extern const pem_event_action process_vbios_event_info_tasks[];
-extern const pem_event_action enable_gfx_clock_gating_tasks[];
-extern const pem_event_action enable_gfx_voltage_island_power_gating_tasks[];
-extern const pem_event_action reset_clock_gating_tasks[];
-extern const pem_event_action notify_smu_vpu_recovery_end_tasks[];
-extern const pem_event_action disable_vpu_cap_tasks[];
-extern const pem_event_action execute_escape_sequence_tasks[];
-extern const pem_event_action notify_power_state_change_tasks[];
-extern const pem_event_action enable_cgpg_tasks[];
-extern const pem_event_action disable_cgpg_tasks[];
-extern const pem_event_action enable_user_2d_performance_tasks[];
-extern const pem_event_action add_user_2d_performance_state_tasks[];
-extern const pem_event_action delete_user_2d_performance_state_tasks[];
-extern const pem_event_action disable_user_2d_performance_tasks[];
-extern const pem_event_action enable_stutter_mode_tasks[];
-extern const pem_event_action enable_disable_bapm_tasks[];
-extern const pem_event_action reset_boot_state_tasks[];
-extern const pem_event_action create_new_user_performance_state_tasks[];
-extern const pem_event_action initialize_thermal_controller_tasks[];
-extern const pem_event_action uninitialize_thermal_controller_tasks[];
-extern const pem_event_action set_cpu_power_state[];
-#endif /* _EVENT_SUB_CHAINS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
deleted file mode 100644
index 8c4ebaae1e0c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "eventmgr.h"
-#include "eventinit.h"
-#include "eventmanagement.h"
-#include "eventmanager.h"
-#include "hardwaremanager.h"
-#include "eventtasks.h"
-#include "power_state.h"
-#include "hwmgr.h"
-#include "amd_powerplay.h"
-#include "psm.h"
-
-#define TEMP_RANGE_MIN (90 * 1000)
-#define TEMP_RANGE_MAX (120 * 1000)
-
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
-
- if (eventmgr == NULL || eventmgr->hwmgr == NULL)
- return -EINVAL;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- phm_force_dpm_levels(eventmgr->hwmgr, eventmgr->hwmgr->dpm_level);
-
- return 0;
-}
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_hwmgr *hwmgr;
-
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- if (event_data->pnew_power_state != NULL)
- hwmgr->request_ps = event_data->pnew_power_state;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
- psm_adjust_power_state_dynamic(eventmgr, event_data->skip_state_adjust_rules);
- else
- psm_adjust_power_state_static(eventmgr, event_data->skip_state_adjust_rules);
-
- return 0;
-}
-
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_power_down_asic(eventmgr->hwmgr);
-}
-
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return pem_unregister_interrupts(eventmgr);
-}
-
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id)
- );
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_dynamic_state_management(eventmgr->hwmgr);
-}
-
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_enable_clock_power_gatings(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_powerdown_uvd(eventmgr->hwmgr);
-}
-
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_powergate_uvd(eventmgr->hwmgr, true);
- phm_powergate_vce(eventmgr->hwmgr, true);
- return 0;
-}
-
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- phm_disable_clock_power_gatings(eventmgr->hwmgr);
- return 0;
-}
-
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_disable_smc_firmware_ctf(eventmgr->hwmgr);
-}
-
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_setup_asic(eventmgr->hwmgr);
-}
-
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_store_dal_configuration(struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config)
-{
- /* TODO */
- return 0;
- /*phm_store_dal_configuration_data(eventmgr->hwmgr, display_config) */
-}
-
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_display_configuration_changed(eventmgr->hwmgr);
-}
-
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return 0;
-}
-
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_hw_access_blocked(eventmgr))
- return 0;
-
- return phm_notify_smc_display_config_after_ps_adjustment(eventmgr->hwmgr);
-}
-
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = true;
- /* to do PHM_ResetIPSCounter(pEventMgr->pHwMgr);*/
- return 0;
-}
-
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- eventmgr->block_adjust_power_state = false;
- return 0;
-}
-
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_set_cpu_power_state(eventmgr->hwmgr);
-}
-
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_clock_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- if (pem_is_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID))
- return psm_set_states(eventmgr, &(event_data->requested_state_id));
-
- return 0;
-}
-
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- /* TODO */
- return 0;
-}
-
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- int result;
-
- if (eventmgr->features[PP_Feature_PowerPlay].supported &&
- !(eventmgr->features[PP_Feature_PowerPlay].enabled))
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_Boot,
- &(event_data->requested_state_id));
- else if (eventmgr->features[PP_Feature_User2DPerformance].enabled)
- result = psm_get_state_by_classification(eventmgr,
- PP_StateClassificationFlag_User2DPerformance,
- &(event_data->requested_state_id));
- else
- result = psm_get_ui_state(eventmgr, PP_StateUILabel_Performance,
- &(event_data->requested_state_id));
-
- if (0 == result)
- pem_set_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
- else
- pem_unset_event_data_valid(event_data->valid_fields, PEM_EventDataValid_RequestedStateID);
-
- return result;
-}
-
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
-restart_search:
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & event_data->requested_ui_label) {
- event_data->pnew_power_state = state;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
-
- switch (event_data->requested_ui_label) {
- case PP_StateUILabel_Battery:
- case PP_StateUILabel_Balanced:
- event_data->requested_ui_label = PP_StateUILabel_Performance;
- goto restart_search;
- default:
- break;
- }
- return -1;
-}
-
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- struct PP_TemperatureRange range;
-
- range.max = TEMP_RANGE_MAX;
- range.min = TEMP_RANGE_MIN;
-
- if (eventmgr == NULL || eventmgr->platform_descriptor == NULL)
- return -EINVAL;
-
- if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ThermalController))
- return phm_start_thermal_controller(eventmgr->hwmgr, &range);
-
- return 0;
-}
-
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
-{
- return phm_stop_thermal_controller(eventmgr->hwmgr);
-}
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
deleted file mode 100644
index 37e7ca5a58e0..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENT_TASKS_H_
-#define _EVENT_TASKS_H_
-#include "eventmgr.h"
-
-struct amd_display_configuration;
-
-/* eventtasks_generic.c */
-int pem_task_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_power_down_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_boot_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_boot_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_new_power_state_clocks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_system_shutdown(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_register_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unregister_interrupts(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_uvd_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_powerdown_vce_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_start_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_stop_asic_block_usage(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_setup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_cleanup_asic(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_store_dal_configuration (struct pp_eventmgr *eventmgr, const struct amd_display_configuration *display_config);
-int pem_task_notify_hw_mgr_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_mgr_pre_display_configuration_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_unblock_adjust_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_power_state_change(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_un_block_hw_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_display_phys_access(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_cpu_power_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_smc_display_config_after_power_state_adjustment(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*powersaving*/
-
-int pem_task_set_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_notify_hw_of_power_source(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_reset_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_power_saving_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_on(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_set_screen_state_off(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_voltage_island_power_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_cgpg(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_gfx_clock_gating(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_enable_stutter_mode(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-/* performance */
-int pem_task_set_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_conditionally_force_3d_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_get_2D_performance_state_id(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_create_user_performance_state(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_update_allowed_performance_levels(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-/*thermal */
-int pem_task_initialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_uninitialize_thermal_controller(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-int pem_task_disable_smc_firmware_ctf(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data);
-
-#endif /* _EVENT_TASKS_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c b/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
deleted file mode 100644
index 489908887e9c..000000000000
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/psm.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "psm.h"
-
-int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.ui_label & ui_label) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->classification.flags & flag) {
- *state_id = state->id;
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id)
-{
- struct pp_power_state *state;
- int table_entries;
- struct pp_hwmgr *hwmgr = eventmgr->hwmgr;
- int i;
-
- table_entries = hwmgr->num_ps;
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- if (state->id == *state_id) {
- memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
- return 0;
- }
- state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
- }
- return -1;
-}
-
-int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip)
-{
-
- struct pp_power_state *pcurrent;
- struct pp_power_state *requested;
- struct pp_hwmgr *hwmgr;
- bool equal;
-
- if (skip)
- return 0;
-
- hwmgr = eventmgr->hwmgr;
- pcurrent = hwmgr->current_ps;
- requested = hwmgr->request_ps;
-
- if (requested == NULL)
- return 0;
-
- phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
-
- if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr, &pcurrent->hardware, &requested->hardware, &equal)))
- equal = false;
-
- if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
- phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
- memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
- }
- return 0;
-}
-
-int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip)
-{
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index f0277c16c2bf..dc4bbcfe1243 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -2,14 +2,15 @@
# Makefile for the 'hw manager' sub-component of powerplay.
# It provides the hardware management services for the driver.
-HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \
+HARDWARE_MGR = hwmgr.o processpptables.o \
hardwaremanager.o pp_acpi.o cz_hwmgr.o \
cz_clockpowergating.o pppcielanes.o\
process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \
smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \
smu7_clockpowergating.o \
vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \
- vega10_thermal.o pp_overdriver.o rv_hwmgr.o
+ vega10_thermal.o rv_hwmgr.o pp_psm.o\
+ pp_overdriver.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b33935fcf428..44de0874629f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -103,16 +103,6 @@ int cz_phm_ungate_all_display_phys(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_uvd_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
-static int cz_tf_vce_power_gating_initialize(struct pp_hwmgr *hwmgr, void *pInput, void *pOutput, void *pStorage, int Result)
-{
- return 0;
-}
-
int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -123,12 +113,12 @@ int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_UVDDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= UVD_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_UVD_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
return 0;
@@ -144,12 +134,12 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
PHM_PlatformCaps_VCEDPM)) {
cz_hwmgr->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
} else {
dpm_features |= VCE_DPM_MASK;
cz_hwmgr->dpm_flags &= ~DPMFlags_VCE_Enabled;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
}
@@ -157,7 +147,7 @@ int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
}
-int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -183,10 +173,9 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
cz_dpm_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -215,29 +204,6 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_CG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
- return 0;
}
-
- return 0;
}
-
-static const struct phm_master_table_item cz_enable_clock_power_gatings_list[] = {
- /*we don't need an exit table here, because there is only D3 cold on Kv*/
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_uvd_power_gating,
- .tableFunction = cz_tf_uvd_power_gating_initialize
- },
- {
- .isFunctionNeededInRuntimeTable = phm_cf_want_vce_power_gating,
- .tableFunction = cz_tf_vce_power_gating_initialize
- },
- /* to do { NULL, cz_tf_xdma_power_gating_enable }, */
- { }
-};
-
-const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_clock_power_gatings_list
-};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
index 1954ceaed439..92f707bc46e7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.h
@@ -29,8 +29,8 @@
extern int cz_phm_set_asic_block_gating(struct pp_hwmgr *hwmgr, enum PHM_AsicBlock block, enum PHM_ClockGateSetting gating);
extern const struct phm_master_table_header cz_phm_enable_clock_power_gatings_master;
-extern int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-extern int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+extern void cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
extern int cz_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
extern int cz_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable);
#endif /* _CZ_CLOCK_POWER_GATING_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index bc839ff0bdd0..73bb99d62a44 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -162,8 +162,8 @@ static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
if (cz_hwmgr->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxSclkLevel);
- cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr->smumgr) + 1;
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
+ cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
}
return cz_hwmgr->max_sclk_level;
@@ -440,14 +440,7 @@ static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_tf_reset_active_process_mask(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
-{
- return 0;
-}
-
-static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
{
struct SMU8_Fusion_ClkTable *clock_table;
int ret;
@@ -469,7 +462,7 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
if (!hwmgr->need_pp_table_upload)
return 0;
- ret = smum_download_powerplay_table(hwmgr->smumgr, &table);
+ ret = smum_download_powerplay_table(hwmgr, &table);
PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
"Fail to get clock table from SMU!", return -EINVAL;);
@@ -561,13 +554,12 @@ static int cz_tf_upload_pptable_to_smu(struct pp_hwmgr *hwmgr, void *input,
(uint8_t)dividers.pll_post_divider;
}
- ret = smum_upload_powerplay_table(hwmgr->smumgr);
+ ret = smum_upload_powerplay_table(hwmgr);
return ret;
}
-static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -593,8 +585,7 @@ static int cz_tf_init_sclk_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_uvd_clock_voltage_dependency_table *table =
@@ -607,8 +598,8 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->uvd_dpm.soft_min_clk = 0;
cz_hwmgr->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -621,8 +612,7 @@ static int cz_tf_init_uvd_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_vce_clock_voltage_dependency_table *table =
@@ -635,8 +625,8 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->vce_dpm.soft_min_clk = 0;
cz_hwmgr->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -649,8 +639,7 @@ static int cz_tf_init_vce_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_acp_clock_voltage_dependency_table *table =
@@ -663,8 +652,8 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->acp_dpm.soft_min_clk = 0;
cz_hwmgr->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr->smumgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
+ level = smum_get_argument(hwmgr);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -676,8 +665,7 @@ static int cz_tf_init_acp_limit(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
@@ -686,22 +674,16 @@ static int cz_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
cz_hwmgr->samu_power_gated = false;
cz_hwmgr->acp_power_gated = false;
cz_hwmgr->pgacpinit = true;
-
- return 0;
}
-static int cz_tf_init_sclk_threshold(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->low_sclk_interrupt_threshold = 0;
-
- return 0;
}
-static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+
+static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
struct phm_clock_voltage_dependency_table *table =
@@ -727,7 +709,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
cz_hwmgr->sclk_dpm.hard_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkHardMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.hard_min_clk,
@@ -753,7 +735,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
cz_hwmgr->sclk_dpm.soft_min_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -764,7 +746,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_StablePState) &&
cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -774,9 +756,7 @@ static int cz_tf_update_sclk_limit(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
@@ -786,7 +766,7 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
clks);
}
@@ -794,77 +774,84 @@ static int cz_tf_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr,
return 0;
}
-static int cz_tf_set_watermark_threshold(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr =
(struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
cz_hwmgr->sclk_dpm.soft_max_clk);
return 0;
}
-static int cz_tf_set_enabled_levels(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
{
+ struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
+ if (hw_data->is_nb_dpm_enabled) {
+ if (enable) {
+ PP_DBG_LOG("enable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableLowMemoryPstate,
+ (lock ? 1 : 0));
+ } else {
+ PP_DBG_LOG("disable Low Memory PState.\n");
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableLowMemoryPstate,
+ (lock ? 1 : 0));
+ }
+ }
+
return 0;
}
-
-static int cz_tf_enable_nb_dpm(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
{
int ret = 0;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
unsigned long dpm_features = 0;
- if (!cz_hwmgr->is_nb_dpm_enabled) {
- PP_DBG_LOG("enabling ALL SMU features.\n");
+ if (cz_hwmgr->is_nb_dpm_enabled) {
+ cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
dpm_features |= NB_DPM_MASK;
ret = smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
- PPSMC_MSG_EnableAllSmuFeatures,
+ hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
dpm_features);
if (ret == 0)
- cz_hwmgr->is_nb_dpm_enabled = true;
+ cz_hwmgr->is_nb_dpm_enabled = false;
}
return ret;
}
-static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
+static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
{
- struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
-
- if (hw_data->is_nb_dpm_enabled) {
- if (enable) {
- PP_DBG_LOG("enable Low Memory PState.\n");
+ int ret = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
- } else {
- PP_DBG_LOG("disable Low Memory PState.\n");
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
- }
+ if (!cz_hwmgr->is_nb_dpm_enabled) {
+ PP_DBG_LOG("enabling ALL SMU features.\n");
+ dpm_features |= NB_DPM_MASK;
+ ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features);
+ if (ret == 0)
+ cz_hwmgr->is_nb_dpm_enabled = true;
}
- return 0;
+ return ret;
}
-static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
{
bool disable_switch;
bool enable_low_mem_state;
@@ -886,64 +873,64 @@ static int cz_tf_update_low_mem_pstate(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item cz_set_power_state_list[] = {
- { .tableFunction = cz_tf_update_sclk_limit },
- { .tableFunction = cz_tf_set_deep_sleep_sclk_threshold },
- { .tableFunction = cz_tf_set_watermark_threshold },
- { .tableFunction = cz_tf_set_enabled_levels },
- { .tableFunction = cz_tf_enable_nb_dpm },
- { .tableFunction = cz_tf_update_low_mem_pstate },
- { }
-};
+static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ int ret = 0;
-static const struct phm_master_table_header cz_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_set_power_state_list
-};
+ cz_update_sclk_limit(hwmgr);
+ cz_set_deep_sleep_sclk_threshold(hwmgr);
+ cz_set_watermark_threshold(hwmgr);
+ ret = cz_enable_nb_dpm(hwmgr);
+ if (ret)
+ return ret;
+ cz_update_low_mem_pstate(hwmgr, input);
-static const struct phm_master_table_item cz_setup_asic_list[] = {
- { .tableFunction = cz_tf_reset_active_process_mask },
- { .tableFunction = cz_tf_upload_pptable_to_smu },
- { .tableFunction = cz_tf_init_sclk_limit },
- { .tableFunction = cz_tf_init_uvd_limit },
- { .tableFunction = cz_tf_init_vce_limit },
- { .tableFunction = cz_tf_init_acp_limit },
- { .tableFunction = cz_tf_init_power_gate_state },
- { .tableFunction = cz_tf_init_sclk_threshold },
- { }
+ return 0;
};
-static const struct phm_master_table_header cz_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_setup_asic_list
-};
-static int cz_tf_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ int ret;
+
+ ret = cz_upload_pptable_to_smu(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_sclk_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_uvd_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_vce_limit(hwmgr);
+ if (ret)
+ return ret;
+ ret = cz_init_acp_limit(hwmgr);
+ if (ret)
+ return ret;
+
+ cz_init_power_gate_state(hwmgr);
+ cz_init_sclk_threshold(hwmgr);
+
+ return 0;
+}
+
+static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
+
hw_data->disp_clk_bypass_pending = false;
hw_data->disp_clk_bypass = false;
-
- return 0;
}
-static int cz_tf_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
- hw_data->is_nb_dpm_enabled = false;
- return 0;
+ hw_data->is_nb_dpm_enabled = false;
}
-static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
@@ -951,63 +938,73 @@ static int cz_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
hw_data->cc6_settings.cpu_pstate_separation_time = 0;
hw_data->cc6_settings.cpu_cc6_disable = false;
hw_data->cc6_settings.cpu_pstate_disable = false;
-
- return 0;
}
-static const struct phm_master_table_item cz_power_down_asic_list[] = {
- { .tableFunction = cz_tf_power_up_display_clock_sys_pll },
- { .tableFunction = cz_tf_clear_nb_dpm_flag },
- { .tableFunction = cz_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header cz_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_power_down_asic_list
+static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ cz_power_up_display_clock_sys_pll(hwmgr);
+ cz_clear_nb_dpm_flag(hwmgr);
+ cz_reset_cc6_data(hwmgr);
+ return 0;
};
-static int cz_tf_program_voting_clients(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
{
PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
- return 0;
}
-static int cz_tf_start_dpm(struct pp_hwmgr *hwmgr, void *input, void *output,
- void *storage, int result)
+static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
+{
+ PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
+}
+
+static int cz_start_dpm(struct pp_hwmgr *hwmgr)
{
- int res = 0xff;
+ int ret = 0;
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
unsigned long dpm_features = 0;
cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
dpm_features |= SCLK_DPM_MASK;
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
dpm_features);
- return res;
+ return ret;
}
-static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+ struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
+ unsigned long dpm_features = 0;
+
+ if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
+ dpm_features |= SCLK_DPM_MASK;
+ cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features);
+ }
+ return ret;
+}
+
+static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1016,13 +1013,11 @@ static int cz_tf_program_bootup_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static int cz_tf_reset_acp_boot_level(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
cz_hwmgr->acp_boot_level = 0xff;
- return 0;
}
static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
@@ -1031,67 +1026,52 @@ static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
int result;
unsigned long features;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
if (result == 0) {
- features = smum_get_argument(hwmgr->smumgr);
+ features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
- return result;
+ return false;
}
-static int cz_tf_check_for_dpm_disabled(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
{
if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
+ return true;
+ return false;
}
-static int cz_tf_enable_didt(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
- /* TO DO */
- return 0;
-}
+ if (!cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been disabled\n");
+ return 0;
+ }
+ cz_disable_nb_dpm(hwmgr);
-static int cz_tf_check_for_dpm_enabled(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
-{
- if (!cz_dpm_check_smu_features(hwmgr,
- SMU_EnabledFeatureScoreboard_SclkDpmOn))
- return PP_Result_TableImmediateExit;
- return 0;
-}
+ cz_clear_voting_clients(hwmgr);
+ if (cz_stop_dpm(hwmgr))
+ return -EINVAL;
-static const struct phm_master_table_item cz_disable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_enabled },
- { },
+ return 0;
};
+static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ if (cz_check_for_dpm_enabled(hwmgr)) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
-static const struct phm_master_table_header cz_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_disable_dpm_list
-};
-
-static const struct phm_master_table_item cz_enable_dpm_list[] = {
- { .tableFunction = cz_tf_check_for_dpm_disabled },
- { .tableFunction = cz_tf_program_voting_clients },
- { .tableFunction = cz_tf_start_dpm },
- { .tableFunction = cz_tf_program_bootup_state },
- { .tableFunction = cz_tf_enable_didt },
- { .tableFunction = cz_tf_reset_acp_boot_level },
- { },
-};
+ cz_program_voting_clients(hwmgr);
+ if (cz_start_dpm(hwmgr))
+ return -EINVAL;
+ cz_program_bootup_state(hwmgr);
+ cz_reset_acp_boot_level(hwmgr);
-static const struct phm_master_table_header cz_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- cz_enable_dpm_list
+ return 0;
};
static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
@@ -1138,7 +1118,11 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cz_ps->action = cz_current_ps->action;
- if (!force_high && (cz_ps->action == FORCE_HIGH))
+ if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
+ else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
+ cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
+ else if (!force_high && (cz_ps->action == FORCE_HIGH))
cz_ps->action = CANCEL_FORCE_HIGH;
else if (force_high && (cz_ps->action != FORCE_HIGH))
cz_ps->action = FORCE_HIGH;
@@ -1173,62 +1157,16 @@ static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
cz_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &cz_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &cz_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &cz_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
- result = phm_construct_table(hwmgr, &cz_phm_enable_clock_power_gatings_master, &(hwmgr->enable_clock_power_gatings));
- if (result != 0) {
- pr_err("Fail to construct enable_clock_power_gatings\n");
- return result;
- }
return result;
}
static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
if (hwmgr != NULL) {
- phm_destroy_table(hwmgr, &(hwmgr->enable_clock_power_gatings));
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -1240,13 +1178,13 @@ static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1278,13 +1216,13 @@ static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
cz_hwmgr->sclk_dpm.soft_max_clk = clock;
cz_hwmgr->sclk_dpm.hard_max_clk = clock;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMin));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_max_clk,
@@ -1297,13 +1235,13 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
PPSMC_MSG_SetSclkSoftMax));
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
cz_get_sclk_level(hwmgr,
cz_hwmgr->sclk_dpm.soft_min_clk,
@@ -1312,106 +1250,25 @@ static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
return 0;
}
-static int cz_phm_force_dpm_sclk(struct pp_hwmgr *hwmgr, uint32_t sclk)
-{
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMin,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMin));
-
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetSclkSoftMax,
- cz_get_sclk_level(hwmgr,
- sclk,
- PPSMC_MSG_SetSclkSoftMax));
- return 0;
-}
-
-static int cz_get_profiling_clk(struct pp_hwmgr *hwmgr, uint32_t *sclk)
-{
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- int32_t tmp_sclk;
- int32_t count;
-
- tmp_sclk = table->entries[table->count-1].clk * 70 / 100;
-
- for (count = table->count-1; count >= 0; count--) {
- if (tmp_sclk >= table->entries[count].clk) {
- tmp_sclk = table->entries[count].clk;
- *sclk = tmp_sclk;
- break;
- }
- }
- if (count < 0)
- *sclk = table->entries[0].clk;
-
- return 0;
-}
-
static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
- uint32_t sclk = 0;
int ret = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
ret = cz_phm_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
ret = cz_phm_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = cz_phm_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = cz_get_profiling_clk(hwmgr, &sclk);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
- cz_phm_force_dpm_sclk(hwmgr, sclk);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -1424,7 +1281,7 @@ int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
@@ -1436,11 +1293,11 @@ int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
return smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
@@ -1457,11 +1314,12 @@ int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
if (!bgate) {
/* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ PHM_PlatformCaps_StablePState)
+ || hwmgr->en_umd_pstate) {
cz_hwmgr->uvd_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].vclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUvdHardMin,
cz_get_uvd_level(hwmgr,
cz_hwmgr->uvd_dpm.hard_min_clk,
@@ -1486,11 +1344,12 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
/* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ PHM_PlatformCaps_StablePState)
+ || hwmgr->en_umd_pstate) {
cz_hwmgr->vce_dpm.hard_min_clk =
ptable->entries[ptable->count - 1].ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
@@ -1498,15 +1357,15 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
} else {
/*Program HardMin based on the vce_arbiter.ecclk */
if (hwmgr->vce_arbiter.ecclk == 0) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin, 0);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkSoftMin, 1);
} else {
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetEclkHardMin,
cz_get_eclk_level(hwmgr,
cz_hwmgr->vce_dpm.hard_min_clk,
@@ -1520,7 +1379,7 @@ int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
@@ -1529,19 +1388,19 @@ int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
-static int cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
return cz_hwmgr->sys_info.bootup_uma_clock;
}
-static int cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct cz_power_state *cz_ps;
@@ -1679,7 +1538,7 @@ static void cz_hw_print_display_cfg(
PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
data);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
data);
}
@@ -1744,10 +1603,10 @@ static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
mask);
break;
@@ -1989,7 +1848,7 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
if (0 == result) {
activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
@@ -2015,7 +1874,6 @@ static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.backend_init = cz_hwmgr_backend_init,
.backend_fini = cz_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = cz_apply_state_adjust_rules,
.force_dpm_level = cz_dpm_force_dpm_level,
.get_power_state_size = cz_get_power_state_size,
@@ -2037,6 +1895,11 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
.get_clock_by_type = cz_get_clock_by_type,
.get_max_high_clocks = cz_get_max_high_clocks,
.read_sensor = cz_read_sensor,
+ .power_off_asic = cz_power_off_asic,
+ .asic_setup = cz_setup_asic_task,
+ .dynamic_state_management_enable = cz_enable_dpm_tasks,
+ .power_state_set = cz_set_power_state_tasks,
+ .dynamic_state_management_disable = cz_disable_dpm_tasks,
};
int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
deleted file mode 100644
index bc7d8bd7e7cb..000000000000
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include "hwmgr.h"
-
-static int phm_run_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input,
- void *output,
- void *temp_storage)
-{
- int result = 0;
- phm_table_function *function;
-
- if (rt_table->function_list == NULL) {
- pr_debug("this function not implement!\n");
- return 0;
- }
-
- for (function = rt_table->function_list; NULL != *function; function++) {
- int tmp = (*function)(hwmgr, input, output, temp_storage, result);
-
- if (tmp == PP_Result_TableImmediateExit)
- break;
- if (tmp) {
- if (0 == result)
- result = tmp;
- if (rt_table->exit_error)
- break;
- }
- }
-
- return result;
-}
-
-int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output)
-{
- int result;
- void *temp_storage;
-
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- if (0 != rt_table->storage_size) {
- temp_storage = kzalloc(rt_table->storage_size, GFP_KERNEL);
- if (temp_storage == NULL) {
- pr_err("Could not allocate table temporary storage\n");
- return -ENOMEM;
- }
- } else {
- temp_storage = NULL;
- }
-
- result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
-
- kfree(temp_storage);
-
- return result;
-}
-
-int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table)
-{
- uint32_t function_count = 0;
- const struct phm_master_table_item *table_item;
- uint32_t size;
- phm_table_function *run_time_list;
- phm_table_function *rtf;
-
- if (hwmgr == NULL || master_table == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter!\n");
- return -EINVAL;
- }
-
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr)))
- function_count++;
- }
-
- size = (function_count + 1) * sizeof(phm_table_function);
- run_time_list = kzalloc(size, GFP_KERNEL);
-
- if (NULL == run_time_list)
- return -ENOMEM;
-
- rtf = run_time_list;
- for (table_item = master_table->master_list;
- NULL != table_item->tableFunction; table_item++) {
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- if ((NULL == table_item->isFunctionNeededInRuntimeTable) ||
- (table_item->isFunctionNeededInRuntimeTable(hwmgr))) {
- *(rtf++) = table_item->tableFunction;
- }
- }
-
- if ((rtf - run_time_list) > function_count) {
- pr_err("Check function results have changed\n");
- kfree(run_time_list);
- return -EINVAL;
- }
-
- *rtf = NULL;
- rt_table->function_list = run_time_list;
- rt_table->exit_error = (0 != (master_table->flags & PHM_MasterTableFlag_ExitOnError));
- rt_table->storage_size = master_table->storage_size;
- return 0;
-}
-
-int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table)
-{
- if (hwmgr == NULL || rt_table == NULL) {
- pr_err("Invalid Parameter\n");
- return -EINVAL;
- }
-
- if (NULL == rt_table->function_list)
- return 0;
-
- kfree(rt_table->function_list);
-
- rt_table->function_list = NULL;
- rt_table->storage_size = 0;
- rt_table->exit_error = false;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index fcc722ea7649..623cff90233d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -26,35 +26,22 @@
#include "hardwaremanager.h"
#include "power_state.h"
+
+#define TEMP_RANGE_MIN (0)
+#define TEMP_RANGE_MAX (80 * 1000)
+
#define PHM_FUNC_CHECK(hw) \
do { \
if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
return -EINVAL; \
} while (0)
-bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr)
-{
- return hwmgr->block_hw_access;
-}
-
-int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block)
-{
- hwmgr->block_hw_access = block;
- return 0;
-}
-
int phm_setup_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->asic_setup)
- return hwmgr->hwmgr_func->asic_setup(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->setup_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->asic_setup)
+ return hwmgr->hwmgr_func->asic_setup(hwmgr);
return 0;
}
@@ -63,14 +50,8 @@ int phm_power_down_asic(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_off_asic)
- return hwmgr->hwmgr_func->power_off_asic(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->power_down_asic),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_off_asic)
+ return hwmgr->hwmgr_func->power_off_asic(hwmgr);
return 0;
}
@@ -86,13 +67,8 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
states.pcurrent_state = pcurrent_state;
states.pnew_state = pnew_power_state;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->power_state_set)
- return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->set_power_state), &states, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->power_state_set)
+ return hwmgr->hwmgr_func->power_state_set(hwmgr, &states);
return 0;
}
@@ -103,15 +79,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
bool enabled;
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->enable_dynamic_state_management),
- NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr);
enabled = ret == 0;
@@ -127,15 +96,8 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (hwmgr->hwmgr_func->dynamic_state_management_disable)
- ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
- } else {
- ret = phm_dispatch_table(hwmgr,
- &(hwmgr->disable_dynamic_state_management),
- NULL, NULL);
- }
+ if (hwmgr->hwmgr_func->dynamic_state_management_disable)
+ ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
enabled = ret == 0 ? false : true;
@@ -193,35 +155,13 @@ int phm_powerdown_uvd(struct pp_hwmgr *hwmgr)
return 0;
}
-int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_uvd != NULL)
- return hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- return 0;
-}
-
-int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate)
-{
- PHM_FUNC_CHECK(hwmgr);
-
- if (hwmgr->hwmgr_func->powergate_vce != NULL)
- return hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- return 0;
-}
-
int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
- return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
- } else {
- return phm_dispatch_table(hwmgr, &(hwmgr->enable_clock_power_gatings), NULL, NULL);
- }
+ if (NULL != hwmgr->hwmgr_func->enable_clock_power_gating)
+ return hwmgr->hwmgr_func->enable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -229,11 +169,9 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
- return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
- }
+ if (NULL != hwmgr->hwmgr_func->disable_clock_power_gating)
+ return hwmgr->hwmgr_func->disable_clock_power_gating(hwmgr);
+
return 0;
}
@@ -242,12 +180,9 @@ int phm_display_configuration_changed(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface)) {
- if (NULL != hwmgr->hwmgr_func->display_config_changed)
- hwmgr->hwmgr_func->display_config_changed(hwmgr);
- } else
- return phm_dispatch_table(hwmgr, &hwmgr->display_configuration_changed, NULL, NULL);
+ if (NULL != hwmgr->hwmgr_func->display_config_changed)
+ hwmgr->hwmgr_func->display_config_changed(hwmgr);
+
return 0;
}
@@ -255,9 +190,7 @@ int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
{
PHM_FUNC_CHECK(hwmgr);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface))
- if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
+ if (NULL != hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment)
hwmgr->hwmgr_func->notify_smc_display_config_after_ps_adjustment(hwmgr);
return 0;
@@ -277,10 +210,10 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
{
PHM_FUNC_CHECK(hwmgr);
- if (hwmgr->hwmgr_func->register_internal_thermal_interrupt == NULL)
- return -EINVAL;
+ if (hwmgr->hwmgr_func->register_internal_thermal_interrupt != NULL)
+ return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
- return hwmgr->hwmgr_func->register_internal_thermal_interrupt(hwmgr, info);
+ return 0;
}
/**
@@ -292,7 +225,21 @@ int phm_register_thermal_interrupt(struct pp_hwmgr *hwmgr, const void *info)
*/
int phm_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *temperature_range)
{
- return phm_dispatch_table(hwmgr, &(hwmgr->start_thermal_controller), temperature_range, NULL);
+ struct PP_TemperatureRange range;
+
+ if (temperature_range == NULL) {
+ range.max = TEMP_RANGE_MAX;
+ range.min = TEMP_RANGE_MIN;
+ } else {
+ range.max = temperature_range->max;
+ range.min = temperature_range->min;
+ }
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_ThermalController)
+ && hwmgr->hwmgr_func->start_thermal_controller != NULL)
+ return hwmgr->hwmgr_func->start_thermal_controller(hwmgr, &range);
+
+ return 0;
}
@@ -323,6 +270,9 @@ int phm_check_states_equal(struct pp_hwmgr *hwmgr,
int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
const struct amd_pp_display_configuration *display_config)
{
+ int index = 0;
+ int number_of_active_display = 0;
+
PHM_FUNC_CHECK(hwmgr);
if (display_config == NULL)
@@ -330,6 +280,17 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
hwmgr->display_config = *display_config;
+ if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk)
+ hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk);
+
+ for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) {
+ if (hwmgr->display_config.displays[index].controller_id != 0)
+ number_of_active_display++;
+ }
+
+ if (NULL != hwmgr->hwmgr_func->set_active_display_count)
+ hwmgr->hwmgr_func->set_active_display_count(hwmgr, number_of_active_display);
+
if (hwmgr->hwmgr_func->store_cc6_data == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 9547f265a8bb..73969f35846c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -26,8 +26,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/pci.h>
#include <drm/amdgpu_drm.h>
-#include "cgs_common.h"
#include "power_state.h"
#include "hwmgr.h"
#include "pppcielanes.h"
@@ -35,21 +35,100 @@
#include "ppsmc.h"
#include "pp_acpi.h"
#include "amd_acpi.h"
+#include "pp_psm.h"
-extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
+extern const struct pp_smumgr_func ci_smu_funcs;
+extern const struct pp_smumgr_func cz_smu_funcs;
+extern const struct pp_smumgr_func iceland_smu_funcs;
+extern const struct pp_smumgr_func tonga_smu_funcs;
+extern const struct pp_smumgr_func fiji_smu_funcs;
+extern const struct pp_smumgr_func polaris10_smu_funcs;
+extern const struct pp_smumgr_func vega10_smu_funcs;
+extern const struct pp_smumgr_func rv_smu_funcs;
+extern int cz_init_function_pointers(struct pp_hwmgr *hwmgr);
static int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr);
static int hwmgr_set_user_specify_caps(struct pp_hwmgr *hwmgr);
static int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr);
static int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr);
+static int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr);
uint8_t convert_to_vid(uint16_t vddc)
{
return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
}
+static int phm_get_pci_bus_devfn(struct pp_hwmgr *hwmgr,
+ struct cgs_system_info *sys_info)
+{
+ sys_info->size = sizeof(struct cgs_system_info);
+ sys_info->info_id = CGS_SYSTEM_INFO_PCIE_BUS_DEVFN;
+
+ return cgs_query_system_info(hwmgr->device, sys_info);
+}
+
+static int phm_thermal_l2h_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU over temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_thermal_h2l_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU under temperature range detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static int phm_ctf_irq(void *private_data,
+ unsigned src_id, const uint32_t *iv_entry)
+{
+ struct pp_hwmgr *hwmgr = (struct pp_hwmgr *)private_data;
+ struct cgs_system_info sys_info = {0};
+ int result;
+
+ result = phm_get_pci_bus_devfn(hwmgr, &sys_info);
+ if (result)
+ return -EINVAL;
+
+ pr_warn("GPU Critical Temperature Fault detected on PCIe %lld:%lld.%lld!\n",
+ PCI_BUS_NUM(sys_info.value),
+ PCI_SLOT(sys_info.value),
+ PCI_FUNC(sys_info.value));
+ return 0;
+}
+
+static const struct cgs_irq_src_funcs thermal_irq_src[3] = {
+ {NULL, phm_thermal_l2h_irq},
+ {NULL, phm_thermal_h2l_irq},
+ {NULL, phm_ctf_irq}
+};
+
int hwmgr_early_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -62,7 +141,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return -ENOMEM;
handle->hwmgr = hwmgr;
- hwmgr->smumgr = handle->smu_mgr;
hwmgr->device = handle->device;
hwmgr->chip_family = handle->chip_family;
hwmgr->chip_id = handle->chip_id;
@@ -73,24 +151,38 @@ int hwmgr_early_init(struct pp_instance *handle)
hwmgr->dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
hwmgr_init_default_caps(hwmgr);
hwmgr_set_user_specify_caps(hwmgr);
+ hwmgr->fan_ctrl_is_in_default_mode = true;
+ hwmgr->reload_fw = 1;
switch (hwmgr->chip_family) {
+ case AMDGPU_FAMILY_CI:
+ hwmgr->smumgr_funcs = &ci_smu_funcs;
+ ci_set_asic_special_caps(hwmgr);
+ hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
+ PP_ENABLE_GFX_CG_THRU_SMU);
+ hwmgr->pp_table_version = PP_TABLE_V0;
+ smu7_init_function_pointers(hwmgr);
+ break;
case AMDGPU_FAMILY_CZ:
+ hwmgr->smumgr_funcs = &cz_smu_funcs;
cz_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
+ hwmgr->smumgr_funcs = &iceland_smu_funcs;
topaz_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
hwmgr->pp_table_version = PP_TABLE_V0;
break;
case CHIP_TONGA:
+ hwmgr->smumgr_funcs = &tonga_smu_funcs;
tonga_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~PP_VBI_TIME_SUPPORT_MASK;
break;
case CHIP_FIJI:
+ hwmgr->smumgr_funcs = &fiji_smu_funcs;
fiji_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~ (PP_VBI_TIME_SUPPORT_MASK |
PP_ENABLE_GFX_CG_THRU_SMU);
@@ -98,6 +190,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
+ hwmgr->smumgr_funcs = &polaris10_smu_funcs;
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -109,6 +202,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
break;
default:
@@ -118,6 +212,7 @@ int hwmgr_early_init(struct pp_instance *handle)
case AMDGPU_FAMILY_RV:
switch (hwmgr->chip_id) {
case CHIP_RAVEN:
+ hwmgr->smumgr_funcs = &rv_smu_funcs;
rv_init_function_pointers(hwmgr);
break;
default:
@@ -131,80 +226,6 @@ int hwmgr_early_init(struct pp_instance *handle)
return 0;
}
-static int hw_init_power_state_table(struct pp_hwmgr *hwmgr)
-{
- int result;
- unsigned int i;
- unsigned int table_entries;
- struct pp_power_state *state;
- int size;
-
- if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
- return -EINVAL;
-
- if (hwmgr->hwmgr_func->get_power_state_size == NULL)
- return -EINVAL;
-
- hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
-
- hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
- sizeof(struct pp_power_state);
-
- hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
- if (hwmgr->ps == NULL)
- return -ENOMEM;
-
- hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->request_ps == NULL) {
- kfree(hwmgr->ps);
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
- if (hwmgr->current_ps == NULL) {
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- return -ENOMEM;
- }
-
- state = hwmgr->ps;
-
- for (i = 0; i < table_entries; i++) {
- result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
-
- if (state->classification.flags & PP_StateClassificationFlag_Boot) {
- hwmgr->boot_ps = state;
- memcpy(hwmgr->current_ps, state, size);
- memcpy(hwmgr->request_ps, state, size);
- }
-
- state->id = i + 1; /* assigned unique num for every power state id */
-
- if (state->classification.flags & PP_StateClassificationFlag_Uvd)
- hwmgr->uvd_ps = state;
- state = (struct pp_power_state *)((unsigned long)state + size);
- }
-
- return 0;
-}
-
-static int hw_fini_power_state_table(struct pp_hwmgr *hwmgr)
-{
- if (hwmgr == NULL)
- return -EINVAL;
-
- kfree(hwmgr->current_ps);
- kfree(hwmgr->request_ps);
- kfree(hwmgr->ps);
- hwmgr->request_ps = NULL;
- hwmgr->ps = NULL;
- hwmgr->current_ps = NULL;
- return 0;
-}
-
int hwmgr_hw_init(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
@@ -228,9 +249,26 @@ int hwmgr_hw_init(struct pp_instance *handle)
if (ret)
goto err1;
- ret = hw_init_power_state_table(hwmgr);
+ ret = psm_init_power_state_table(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_setup_asic(hwmgr);
if (ret)
goto err2;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ goto err2;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ goto err2;
+
+ ret = phm_register_thermal_interrupt(hwmgr, &thermal_irq_src);
+ if (ret)
+ goto err2;
+
return 0;
err2:
if (hwmgr->hwmgr_func->backend_fini)
@@ -247,19 +285,138 @@ int hwmgr_hw_fini(struct pp_instance *handle)
{
struct pp_hwmgr *hwmgr;
- if (handle == NULL)
+ if (handle == NULL || handle->hwmgr == NULL)
return -EINVAL;
hwmgr = handle->hwmgr;
+ phm_stop_thermal_controller(hwmgr);
+ psm_set_boot_states(hwmgr);
+ phm_display_configuration_changed(hwmgr);
+ psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ phm_disable_dynamic_state_management(hwmgr);
+ phm_disable_clock_power_gatings(hwmgr);
+
if (hwmgr->hwmgr_func->backend_fini)
hwmgr->hwmgr_func->backend_fini(hwmgr);
if (hwmgr->pptable_func->pptable_fini)
hwmgr->pptable_func->pptable_fini(hwmgr);
- return hw_fini_power_state_table(hwmgr);
+ return psm_fini_power_state_table(hwmgr);
}
+int hwmgr_hw_suspend(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ phm_disable_smc_firmware_ctf(hwmgr);
+ ret = psm_set_boot_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ if (ret)
+ return ret;
+ ret = phm_power_down_asic(hwmgr);
+
+ return ret;
+}
+int hwmgr_hw_resume(struct pp_instance *handle)
+{
+ struct pp_hwmgr *hwmgr;
+ int ret = 0;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+ ret = phm_setup_asic(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = phm_enable_dynamic_state_management(hwmgr);
+ if (ret)
+ return ret;
+ ret = phm_start_thermal_controller(hwmgr, NULL);
+ if (ret)
+ return ret;
+
+ ret |= psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+
+ return ret;
+}
+
+static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
+{
+ switch (state) {
+ case POWER_STATE_TYPE_BATTERY:
+ return PP_StateUILabel_Battery;
+ case POWER_STATE_TYPE_BALANCED:
+ return PP_StateUILabel_Balanced;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ return PP_StateUILabel_Performance;
+ default:
+ return PP_StateUILabel_None;
+ }
+}
+
+int hwmgr_handle_task(struct pp_instance *handle, enum amd_pp_task task_id,
+ void *input, void *output)
+{
+ int ret = 0;
+ struct pp_hwmgr *hwmgr;
+
+ if (handle == NULL || handle->hwmgr == NULL)
+ return -EINVAL;
+
+ hwmgr = handle->hwmgr;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = phm_set_cpu_power_state(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_set_performance_states(hwmgr);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ case AMD_PP_TASK_ENABLE_USER_STATE:
+ {
+ enum amd_pm_state_type ps;
+ enum PP_StateUILabel requested_ui_label;
+ struct pp_power_state *requested_ps = NULL;
+
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ps = *(unsigned long *)input;
+
+ requested_ui_label = power_state_convert(ps);
+ ret = psm_set_user_performance_state(hwmgr, requested_ui_label, &requested_ps);
+ if (ret)
+ return ret;
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, requested_ps);
+ break;
+ }
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = psm_adjust_power_state_dynamic(hwmgr, false, NULL);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
/**
* Returns once the part of the register indicated by the mask has
* reached the given value.
@@ -294,7 +451,7 @@ int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
* reached the given value.The indirect space is described by giving
* the memory-mapped index of the indirect index register.
*/
-void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
@@ -302,14 +459,50 @@ void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
{
if (hwmgr == NULL || hwmgr->device == NULL) {
pr_err("Invalid Hardware Manager!");
- return;
+ return -EINVAL;
}
cgs_write_register(hwmgr->device, indirect_port, index);
- phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+ return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
+}
+
+int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask)
+{
+ uint32_t i;
+ uint32_t cur_value;
+
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
+ cur_value = cgs_read_register(hwmgr->device,
+ index);
+ if ((cur_value & mask) != (value & mask))
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == hwmgr->usec_timeout)
+ return -ETIME;
+ return 0;
}
+int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port,
+ uint32_t index,
+ uint32_t value,
+ uint32_t mask)
+{
+ if (hwmgr == NULL || hwmgr->device == NULL)
+ return -EINVAL;
+ cgs_write_register(hwmgr->device, indirect_port, index);
+ return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
+ value, mask);
+}
bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
{
@@ -678,7 +871,7 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
for (i = 0; i < vddc_table->count; i++) {
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VddC_Request, req_volt);
return;
}
@@ -689,28 +882,8 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
{
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableEngineTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMemoryTransition);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMGCGTSSM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLSClockGating);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_Force3DClockSupport);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableLightSleep);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMCLS);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisablePowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableDPM);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableSMUUVDHandshake);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalAutoThrottling);
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_NoOD5Support);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UserMaxClockForMultiDisplays);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM);
phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM);
@@ -735,7 +908,6 @@ void hwmgr_init_default_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_FanSpeedInTableIsRPM);
-
return;
}
@@ -784,7 +956,8 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
-
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
@@ -793,10 +966,6 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition);
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
-
if (hwmgr->chip_id != CHIP_POLARIS10)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
@@ -814,6 +983,8 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -822,15 +993,13 @@ int fiji_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_TDRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
return 0;
}
int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -844,14 +1013,25 @@ int tonga_set_asic_special_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDPowerGating);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_VCEPowerGating);
+ return 0;
+}
+int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+{
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
+ PHM_PlatformCaps_EVV);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SQRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_DBRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TDRamping);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_TCPRamping);
return 0;
}
-int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
+int ci_set_asic_special_caps(struct pp_hwmgr *hwmgr)
{
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SQRamping);
@@ -862,8 +1042,8 @@ int topaz_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TCPRamping);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_EVV);
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
new file mode 100644
index 000000000000..167cdc321db2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include "pp_psm.h"
+
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ unsigned int i;
+ unsigned int table_entries;
+ struct pp_power_state *state;
+ int size;
+
+ if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
+ return -EINVAL;
+
+ if (hwmgr->hwmgr_func->get_power_state_size == NULL)
+ return -EINVAL;
+
+ hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
+
+ hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
+ sizeof(struct pp_power_state);
+
+ hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL);
+ if (hwmgr->ps == NULL)
+ return -ENOMEM;
+
+ hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->request_ps == NULL) {
+ kfree(hwmgr->ps);
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
+ if (hwmgr->current_ps == NULL) {
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ return -ENOMEM;
+ }
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
+
+ if (state->classification.flags & PP_StateClassificationFlag_Boot) {
+ hwmgr->boot_ps = state;
+ memcpy(hwmgr->current_ps, state, size);
+ memcpy(hwmgr->request_ps, state, size);
+ }
+
+ state->id = i + 1; /* assigned unique num for every power state id */
+
+ if (state->classification.flags & PP_StateClassificationFlag_Uvd)
+ hwmgr->uvd_ps = state;
+ state = (struct pp_power_state *)((unsigned long)state + size);
+ }
+
+ return 0;
+}
+
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
+{
+ if (hwmgr == NULL)
+ return -EINVAL;
+
+ kfree(hwmgr->current_ps);
+ kfree(hwmgr->request_ps);
+ kfree(hwmgr->ps);
+ hwmgr->request_ps = NULL;
+ hwmgr->ps = NULL;
+ hwmgr->current_ps = NULL;
+ return 0;
+}
+
+static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel ui_label,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.ui_label & ui_label) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
+ enum PP_StateClassificationFlag flag,
+ unsigned long *state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->classification.flags & flag) {
+ *state_id = state->id;
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
+{
+ struct pp_power_state *state;
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+
+ state = hwmgr->ps;
+
+ for (i = 0; i < table_entries; i++) {
+ if (state->id == state_id) {
+ memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
+ return 0;
+ }
+ state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
+ }
+ return -EINVAL;
+}
+
+int psm_set_boot_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_performance_states(struct pp_hwmgr *hwmgr)
+{
+ unsigned long state_id;
+ int ret = -EINVAL;
+
+ if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
+ &state_id))
+ ret = psm_set_states(hwmgr, state_id);
+
+ return ret;
+}
+
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state)
+{
+ int table_entries;
+ int i;
+
+ table_entries = hwmgr->num_ps;
+ *state = hwmgr->ps;
+
+restart_search:
+ for (i = 0; i < table_entries; i++) {
+ if ((*state)->classification.ui_label & label_id)
+ return 0;
+ *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
+ }
+
+ switch (label_id) {
+ case PP_StateUILabel_Battery:
+ case PP_StateUILabel_Balanced:
+ label_id = PP_StateUILabel_Performance;
+ goto restart_search;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
+ struct pp_power_state *new_ps)
+{
+ struct pp_power_state *pcurrent;
+ struct pp_power_state *requested;
+ bool equal;
+
+ if (skip)
+ return 0;
+
+ if (new_ps != NULL)
+ requested = new_ps;
+ else
+ requested = hwmgr->request_ps;
+
+ pcurrent = hwmgr->current_ps;
+
+ phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
+
+ if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
+ &pcurrent->hardware, &requested->hardware, &equal)))
+ equal = false;
+
+ if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
+ phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
+ memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
index 9ef96aab3f24..fa1b6825036a 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventinit.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015 Advanced Micro Devices, Inc.
+ * Copyright 2017 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -21,14 +21,20 @@
*
*/
-#ifndef _EVENTINIT_H_
-#define _EVENTINIT_H_
+#ifndef PP_PSM_H
+#define PP_PSM_H
-#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
+#include "hwmgr.h"
-void pem_init_feature_info(struct pp_eventmgr *eventmgr);
-void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
-int pem_register_interrupts(struct pp_eventmgr *eventmgr);
-int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
+int psm_init_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_fini_power_state_table(struct pp_hwmgr *hwmgr);
+int psm_set_boot_states(struct pp_hwmgr *hwmgr);
+int psm_set_performance_states(struct pp_hwmgr *hwmgr);
+int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
+ enum PP_StateUILabel label_id,
+ struct pp_power_state **state);
+int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr,
+ bool skip,
+ struct pp_power_state *new_ps);
-#endif /* _EVENTINIT_H_ */
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 953e0c9ad7cd..a129bc5b1844 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -470,7 +470,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr)
* SET_VOLTAGE_TYPE_ASIC_MVDDC, SET_VOLTAGE_TYPE_ASIC_MVDDQ.
* voltage_mode is one of ATOM_SET_VOLTAGE, ATOM_SET_VOLTAGE_PHASE
*/
-bool atomctrl_is_voltage_controled_by_gpio_v3(
+bool atomctrl_is_voltage_controlled_by_gpio_v3(
struct pp_hwmgr *hwmgr,
uint8_t voltage_type,
uint8_t voltage_mode)
@@ -1100,10 +1100,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
}
}
- PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
- "Can't find requested voltage id in vddc_dependency_on_sclk table!",
+ if (entry_id >= hwmgr->dyn_state.vddc_dependency_on_sclk->count) {
+ pr_debug("Can't find requested voltage id in vddc_dependency_on_sclk table!\n");
return -EINVAL;
- );
+ }
get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
@@ -1418,3 +1418,83 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
return 0;
}
+
+int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id)
+{
+ int result;
+ SET_VOLTAGE_PS_ALLOCATION allocation;
+ SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters =
+ (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage;
+
+ voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+
+ result = cgs_atom_exec_cmd_table(hwmgr->device,
+ GetIndexIntoMasterTable(COMMAND, SetVoltage),
+ voltage_parameters);
+
+ *virtual_voltage_id = voltage_parameters->usVoltageLevel;
+
+ return result;
+}
+
+int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id)
+{
+ int i, j;
+ int ix;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ cgs_atom_get_data_table(hwmgr->device,
+ ix,
+ NULL, NULL, NULL);
+ if (!profile)
+ return -EINVAL;
+
+ if ((profile->asHeader.ucTableFormatRevision >= 2) &&
+ (profile->asHeader.ucTableContentRevision >= 1) &&
+ (profile->asHeader.usStructureSize >= sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))) {
+ leakage_bin = (u16 *)((char *)profile + profile->usLeakageBinArrayOffset);
+ vddc_id_buf = (u16 *)((char *)profile + profile->usElbVDDC_IdArrayOffset);
+ vddc_buf = (u16 *)((char *)profile + profile->usElbVDDC_LevelArrayOffset);
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ vddci_id_buf = (u16 *)((char *)profile + profile->usElbVDDCI_IdArrayOffset);
+ vddci_buf = (u16 *)((char *)profile + profile->usElbVDDCI_LevelArrayOffset);
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (efuse_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index e9fe2e84006b..c44a92064cf1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -291,7 +291,7 @@ extern uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr);
extern int atomctrl_get_memory_pll_dividers_si(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode);
extern int atomctrl_get_engine_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
extern int atomctrl_get_dfs_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers);
-extern bool atomctrl_is_voltage_controled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
+extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode);
extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table);
extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param);
@@ -314,5 +314,11 @@ extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_
extern int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint8_t *svd_gpio_id, uint8_t *svc_gpio_id,
uint16_t *load_line);
+
+extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr,
+ uint16_t *vddc, uint16_t *vddci,
+ uint16_t virtual_voltage_id,
+ uint16_t efuse_voltage_id);
+extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 84f01fd33aff..d1af1483c69b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -173,8 +173,6 @@ static int get_vddc_lookup_table(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = vddc_lookup_pp_tables->ucNumEntries;
for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) {
@@ -335,8 +333,6 @@ static int get_valid_clk(
if (NULL == table)
return -ENOMEM;
- memset(table, 0x00, table_size);
-
table->count = (uint32_t)clk_volt_pp_table->count;
for (i = 0; i < table->count; i++) {
@@ -390,8 +386,6 @@ static int get_mclk_voltage_dependency_table(
if (NULL == mclk_table)
return -ENOMEM;
- memset(mclk_table, 0x00, table_size);
-
mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries;
for (i = 0; i < mclk_dep_table->ucNumEntries; i++) {
@@ -439,8 +433,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
for (i = 0; i < tonga_table->ucNumEntries; i++) {
@@ -473,8 +465,6 @@ static int get_sclk_voltage_dependency_table(
if (NULL == sclk_table)
return -ENOMEM;
- memset(sclk_table, 0x00, table_size);
-
sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
for (i = 0; i < polaris_table->ucNumEntries; i++) {
@@ -525,8 +515,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -567,8 +555,6 @@ static int get_pcie_table(
if (pcie_table == NULL)
return -ENOMEM;
- memset(pcie_table, 0x00, table_size);
-
/*
* Make sure the number of pcie entries are less than or equal to sclk dpm levels.
* Since first PCIE entry is for ULV, #pcie has to be <= SclkLevel + 1.
@@ -615,8 +601,6 @@ static int get_cac_tdp_table(
if (NULL == tdp_table)
return -ENOMEM;
- memset(tdp_table, 0x00, table_size);
-
hwmgr->dyn_state.cac_dtp_table = kzalloc(table_size, GFP_KERNEL);
if (NULL == hwmgr->dyn_state.cac_dtp_table) {
@@ -624,8 +608,6 @@ static int get_cac_tdp_table(
return -ENOMEM;
}
- memset(hwmgr->dyn_state.cac_dtp_table, 0x00, table_size);
-
if (table->ucRevId < 3) {
const ATOM_Tonga_PowerTune_Table *tonga_table =
(ATOM_Tonga_PowerTune_Table *)table;
@@ -725,8 +707,6 @@ static int get_mm_clock_voltage_table(
if (NULL == mm_table)
return -ENOMEM;
- memset(mm_table, 0x00, table_size);
-
mm_table->count = mm_dependency_table->ucNumEntries;
for (i = 0; i < mm_dependency_table->ucNumEntries; i++) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2716721e5453..485f7ebdc754 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -24,7 +24,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/slab.h>
-
+#include <drm/amdgpu_drm.h>
#include "processpptables.h"
#include <atom-types.h>
#include <atombios.h>
@@ -790,6 +790,39 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
return pstate;
}
+static unsigned char soft_dummy_pp_table[] = {
+ 0xe1, 0x01, 0x06, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x00, 0x4a, 0x00, 0x6c, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x42, 0x00, 0x02, 0x00, 0x00, 0x00, 0x13, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x4e, 0x00, 0x88, 0x00, 0x00, 0x9e, 0x00, 0x17, 0x00, 0x00, 0x00, 0x9e, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x02, 0x02, 0x00, 0x00, 0x01, 0x01, 0x01, 0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x00,
+ 0x07, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00,
+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x18, 0x05, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x00, 0x43, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0x8e, 0x01, 0x00, 0x00, 0xb8, 0x01, 0x00, 0x00, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x70, 0x00, 0x91, 0xf4, 0x00,
+ 0x64, 0x00, 0x40, 0x19, 0x01, 0x5a, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00, 0x00, 0x09, 0x30, 0x75, 0x00, 0x30, 0x75, 0x00, 0x40, 0x9c, 0x00, 0x40, 0x9c, 0x00, 0x59,
+ 0xd8, 0x00, 0x59, 0xd8, 0x00, 0x91, 0xf4, 0x00, 0x91, 0xf4, 0x00, 0x0e, 0x28, 0x01, 0x0e, 0x28,
+ 0x01, 0x90, 0x5f, 0x01, 0x90, 0x5f, 0x01, 0x00, 0x77, 0x01, 0x00, 0x77, 0x01, 0xca, 0x91, 0x01,
+ 0xca, 0x91, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01,
+ 0x7c, 0x00, 0x02, 0x70, 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a,
+ 0x00, 0x07, 0x08, 0x08, 0x00, 0x08, 0x00, 0x01, 0x02, 0x02, 0x02, 0x01, 0x02, 0x02, 0x02, 0x03,
+ 0x02, 0x04, 0x02, 0x00, 0x08, 0x40, 0x9c, 0x00, 0x30, 0x75, 0x00, 0x74, 0xb5, 0x00, 0xa0, 0x8c,
+ 0x00, 0x60, 0xea, 0x00, 0x74, 0xb5, 0x00, 0x0e, 0x28, 0x01, 0x60, 0xea, 0x00, 0x90, 0x5f, 0x01,
+ 0x40, 0x19, 0x01, 0xb2, 0xb0, 0x01, 0x90, 0x5f, 0x01, 0xc0, 0xd4, 0x01, 0x00, 0x77, 0x01, 0x5e,
+ 0xff, 0x01, 0xca, 0x91, 0x01, 0x08, 0x80, 0x00, 0x00, 0x7e, 0x00, 0x01, 0x7c, 0x00, 0x02, 0x70,
+ 0x00, 0x03, 0x64, 0x00, 0x04, 0x5a, 0x00, 0x05, 0x52, 0x00, 0x06, 0x4a, 0x00, 0x07, 0x00, 0x08,
+ 0x80, 0x00, 0x30, 0x75, 0x00, 0x7e, 0x00, 0x40, 0x9c, 0x00, 0x7c, 0x00, 0x59, 0xd8, 0x00, 0x70,
+ 0x00, 0xdc, 0x0b, 0x01, 0x64, 0x00, 0x80, 0x38, 0x01, 0x5a, 0x00, 0x80, 0x38, 0x01, 0x52, 0x00,
+ 0x80, 0x38, 0x01, 0x4a, 0x00, 0x80, 0x38, 0x01, 0x08, 0x30, 0x75, 0x00, 0x80, 0x00, 0xa0, 0x8c,
+ 0x00, 0x7e, 0x00, 0x71, 0xa5, 0x00, 0x7c, 0x00, 0xe5, 0xc8, 0x00, 0x74, 0x00, 0x91, 0xf4, 0x00,
+ 0x66, 0x00, 0x40, 0x19, 0x01, 0x58, 0x00, 0x0e, 0x28, 0x01, 0x52, 0x00, 0x80, 0x38, 0x01, 0x4a,
+ 0x00
+};
static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
struct pp_hwmgr *hwmgr)
@@ -799,12 +832,17 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
uint16_t size;
if (!table_addr) {
- table_addr = cgs_atom_get_data_table(hwmgr->device,
- GetIndexIntoMasterTable(DATA, PowerPlayInfo),
- &size, &frev, &crev);
-
- hwmgr->soft_pp_table = table_addr;
- hwmgr->soft_pp_table_size = size;
+ if (hwmgr->chip_id == CHIP_RAVEN) {
+ table_addr = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table = &soft_dummy_pp_table[0];
+ hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table);
+ } else {
+ table_addr = cgs_atom_get_data_table(hwmgr->device,
+ GetIndexIntoMasterTable(DATA, PowerPlayInfo),
+ &size, &frev, &crev);
+ hwmgr->soft_pp_table = table_addr;
+ hwmgr->soft_pp_table_size = size;
+ }
}
return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
@@ -924,15 +962,14 @@ int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
}
}
- if ((0 == result) &&
- (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot)))
- result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ if ((0 == result) && (0 != (ps->classification.flags & PP_StateClassificationFlag_Boot))) {
+ if (hwmgr->chip_family < AMDGPU_FAMILY_RV)
+ result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(ps->hardware));
+ }
return result;
}
-
-
static int init_powerplay_tables(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table
@@ -1615,85 +1652,53 @@ static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_RAVEN)
return 0;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
- hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
+ hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
- hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddci_dependency_on_mclk);
+ hwmgr->dyn_state.vddci_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.vddc_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
- hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dependency_on_mclk);
+ hwmgr->dyn_state.vddc_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
- kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
- hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- }
+ kfree(hwmgr->dyn_state.mvdd_dependency_on_mclk);
+ hwmgr->dyn_state.mvdd_dependency_on_mclk = NULL;
- if (NULL != hwmgr->dyn_state.valid_mclk_values) {
- kfree(hwmgr->dyn_state.valid_mclk_values);
- hwmgr->dyn_state.valid_mclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_mclk_values);
+ hwmgr->dyn_state.valid_mclk_values = NULL;
- if (NULL != hwmgr->dyn_state.valid_sclk_values) {
- kfree(hwmgr->dyn_state.valid_sclk_values);
- hwmgr->dyn_state.valid_sclk_values = NULL;
- }
+ kfree(hwmgr->dyn_state.valid_sclk_values);
+ hwmgr->dyn_state.valid_sclk_values = NULL;
- if (NULL != hwmgr->dyn_state.cac_leakage_table) {
- kfree(hwmgr->dyn_state.cac_leakage_table);
- hwmgr->dyn_state.cac_leakage_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_leakage_table);
+ hwmgr->dyn_state.cac_leakage_table = NULL;
- if (NULL != hwmgr->dyn_state.vddc_phase_shed_limits_table) {
- kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
- hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_phase_shed_limits_table);
+ hwmgr->dyn_state.vddc_phase_shed_limits_table = NULL;
- if (NULL != hwmgr->dyn_state.vce_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
- hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vce_clock_voltage_dependency_table);
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.uvd_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
- hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.samu_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
- hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.samu_clock_voltage_dependency_table);
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.acp_clock_voltage_dependency_table) {
- kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
- hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- }
+ kfree(hwmgr->dyn_state.acp_clock_voltage_dependency_table);
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table = NULL;
- if (NULL != hwmgr->dyn_state.cac_dtp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
- }
+ kfree(hwmgr->dyn_state.cac_dtp_table);
+ hwmgr->dyn_state.cac_dtp_table = NULL;
- if (NULL != hwmgr->dyn_state.ppm_parameter_table) {
- kfree(hwmgr->dyn_state.ppm_parameter_table);
- hwmgr->dyn_state.ppm_parameter_table = NULL;
- }
+ kfree(hwmgr->dyn_state.ppm_parameter_table);
+ hwmgr->dyn_state.ppm_parameter_table = NULL;
- if (NULL != hwmgr->dyn_state.vdd_gfx_dependency_on_sclk) {
- kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
- hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- }
+ kfree(hwmgr->dyn_state.vdd_gfx_dependency_on_sclk);
+ hwmgr->dyn_state.vdd_gfx_dependency_on_sclk = NULL;
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(hwmgr->dyn_state.vq_budgeting_table);
+ hwmgr->dyn_state.vq_budgeting_table = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
index 2c3e6baf2524..9186b0788fac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c
@@ -38,20 +38,17 @@
#include "pp_soc15.h"
#define RAVEN_MAX_DEEPSLEEP_DIVIDER_ID 5
-#define RAVEN_MINIMUM_ENGINE_CLOCK 800 //8Mhz, the low boundary of engine clock allowed on this chip
+#define RAVEN_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
#define SCLK_MIN_DIV_INTV_SHIFT 12
-#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 //100mhz
+#define RAVEN_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
static const unsigned long PhwRaven_Magic = (unsigned long) PHM_Rv_Magic;
+
+
int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req);
-struct phm_vq_budgeting_record rv_vqtable[] = {
- /* _TBD
- * CUs, SSP low, SSP High, Min Sclk Low, Min Sclk, High, AWD/non-AWD, DCLK, ECLK, Sustainable Sclk, Sustainable CUs */
- { 8, 0, 45, 0, 0, VQ_DisplayConfig_NoneAWD, 80000, 120000, 4, 0 },
-};
static struct rv_power_state *cast_rv_ps(struct pp_hw_power_state *hw_ps)
{
@@ -70,101 +67,27 @@ static const struct rv_power_state *cast_const_rv_ps(
return (struct rv_power_state *)hw_ps;
}
-static int rv_init_vq_budget_table(struct pp_hwmgr *hwmgr)
-{
- uint32_t table_size, i;
- struct phm_vq_budgeting_table *ptable;
- uint32_t num_entries = ARRAY_SIZE(rv_vqtable);
-
- if (hwmgr->dyn_state.vq_budgeting_table != NULL)
- return 0;
-
- table_size = sizeof(struct phm_vq_budgeting_table) +
- sizeof(struct phm_vq_budgeting_record) * (num_entries - 1);
-
- ptable = kzalloc(table_size, GFP_KERNEL);
- if (NULL == ptable)
- return -ENOMEM;
-
- ptable->numEntries = (uint8_t) num_entries;
-
- for (i = 0; i < ptable->numEntries; i++) {
- ptable->entries[i].ulCUs = rv_vqtable[i].ulCUs;
- ptable->entries[i].ulSustainableSOCPowerLimitLow = rv_vqtable[i].ulSustainableSOCPowerLimitLow;
- ptable->entries[i].ulSustainableSOCPowerLimitHigh = rv_vqtable[i].ulSustainableSOCPowerLimitHigh;
- ptable->entries[i].ulMinSclkLow = rv_vqtable[i].ulMinSclkLow;
- ptable->entries[i].ulMinSclkHigh = rv_vqtable[i].ulMinSclkHigh;
- ptable->entries[i].ucDispConfig = rv_vqtable[i].ucDispConfig;
- ptable->entries[i].ulDClk = rv_vqtable[i].ulDClk;
- ptable->entries[i].ulEClk = rv_vqtable[i].ulEClk;
- ptable->entries[i].ulSustainableSclk = rv_vqtable[i].ulSustainableSclk;
- ptable->entries[i].ulSustainableCUs = rv_vqtable[i].ulSustainableCUs;
- }
-
- hwmgr->dyn_state.vq_budgeting_table = ptable;
-
- return 0;
-}
-
static int rv_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_hwmgr = (struct rv_hwmgr *)(hwmgr->backend);
- struct cgs_system_info sys_info = {0};
- int result;
- rv_hwmgr->ddi_power_gating_disabled = 0;
- rv_hwmgr->bapm_enabled = 1;
rv_hwmgr->dce_slow_sclk_threshold = 30000;
- rv_hwmgr->disable_driver_thermal_policy = 1;
rv_hwmgr->thermal_auto_throttling_treshold = 0;
rv_hwmgr->is_nb_dpm_enabled = 1;
rv_hwmgr->dpm_flags = 1;
- rv_hwmgr->disable_smu_acp_s3_handshake = 1;
- rv_hwmgr->disable_notify_smu_vpu_recovery = 0;
rv_hwmgr->gfx_off_controled_by_driver = false;
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicM3Arbiter);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDynamicPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SamuPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ACP);
+ rv_hwmgr->need_min_deep_sleep_dcefclk = true;
+ rv_hwmgr->num_active_display = 0;
+ rv_hwmgr->deep_sleep_dcefclk = 0;
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
-
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkThrottleLowNotification);
- phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableVoltageIsland);
-
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DynamicUVDState);
-
- sys_info.size = sizeof(struct cgs_system_info);
- sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
- result = cgs_query_system_info(hwmgr->device, &sys_info);
- if (!result) {
- if (sys_info.value & AMD_PG_SUPPORT_GFX_DMG)
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_GFXDynamicMGPowerGating);
- }
-
+ PHM_PlatformCaps_PowerPlaySupport);
return 0;
}
@@ -234,102 +157,88 @@ static int rv_construct_boot_state(struct pp_hwmgr *hwmgr)
return 0;
}
-static int rv_tf_set_clock_limit(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct PP_Clocks clocks = {0};
struct pp_display_clock_request clock_req;
clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
- clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
clock_req.clock_type = amd_pp_dcf_clock;
clock_req.clock_freq_in_khz = clocks.dcefClock * 10;
- if (clocks.dcefClock == 0 && clocks.dcefClockInSR == 0)
- clock_req.clock_freq_in_khz = rv_data->dcf_actual_hard_min_freq;
-
PP_ASSERT_WITH_CODE(!rv_display_clock_voltage_request(hwmgr, &clock_req),
"Attempt to set DCF Clock Failed!", return -EINVAL);
- if(rv_data->need_min_deep_sleep_dcefclk && 0 != clocks.dcefClockInSR)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetMinDeepSleepDcefclk,
- clocks.dcefClockInSR / 100);
- /*
- if(!rv_data->isp_tileA_power_gated || !rv_data->isp_tileB_power_gated) {
- if ((hwmgr->ispArbiter.iclk != 0) && (rv_data->ISPActualHardMinFreq != (hwmgr->ispArbiter.iclk / 100) )) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetHardMinIspclkByFreq, hwmgr->ispArbiter.iclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->ISPActualHardMinFreq),
- }
- } */
-
if (((hwmgr->uvd_arbiter.vclk_soft_min / 100) != rv_data->vclk_soft_min) ||
((hwmgr->uvd_arbiter.dclk_soft_min / 100) != rv_data->dclk_soft_min)) {
rv_data->vclk_soft_min = hwmgr->uvd_arbiter.vclk_soft_min / 100;
rv_data->dclk_soft_min = hwmgr->uvd_arbiter.dclk_soft_min / 100;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinVcn,
(rv_data->vclk_soft_min << 16) | rv_data->vclk_soft_min);
}
if((hwmgr->gfx_arbiter.sclk_hard_min != 0) &&
((hwmgr->gfx_arbiter.sclk_hard_min / 100) != rv_data->soc_actual_hard_min_freq)) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
hwmgr->gfx_arbiter.sclk_hard_min / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->soc_actual_hard_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->soc_actual_hard_min_freq);
}
if ((hwmgr->gfx_arbiter.gfxclk != 0) &&
(rv_data->gfx_actual_soft_min_freq != (hwmgr->gfx_arbiter.gfxclk))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoGfxclkFreq,
hwmgr->gfx_arbiter.gfxclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->gfx_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->gfx_actual_soft_min_freq);
}
if ((hwmgr->gfx_arbiter.fclk != 0) &&
(rv_data->fabric_actual_soft_min_freq != (hwmgr->gfx_arbiter.fclk / 100))) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinVideoFclkFreq,
hwmgr->gfx_arbiter.fclk / 100);
- rv_read_arg_from_smc(hwmgr->smumgr, &rv_data->fabric_actual_soft_min_freq);
+ rv_read_arg_from_smc(hwmgr, &rv_data->fabric_actual_soft_min_freq);
}
return 0;
}
-static int rv_tf_set_num_active_display(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_set_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
- uint32_t num_of_active_displays = 0;
- struct cgs_display_info info = {0};
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
- cgs_get_active_displays_info(hwmgr->device, &info);
- num_of_active_displays = info.display_count;
+ if (rv_data->need_min_deep_sleep_dcefclk && rv_data->deep_sleep_dcefclk != clock/100) {
+ rv_data->deep_sleep_dcefclk = clock/100;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetMinDeepSleepDcefclk,
+ rv_data->deep_sleep_dcefclk);
+ }
+ return 0;
+}
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+static int rv_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
+{
+ struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
+
+ if (rv_data->num_active_display != count) {
+ rv_data->num_active_display = count;
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- num_of_active_displays);
+ rv_data->num_active_display);
+ }
+
return 0;
}
-static const struct phm_master_table_item rv_set_power_state_list[] = {
- { .tableFunction = rv_tf_set_clock_limit },
- { .tableFunction = rv_tf_set_num_active_display },
- { }
-};
-
-static const struct phm_master_table_header rv_set_power_state_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_set_power_state_list
-};
+static int rv_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+{
+ return rv_set_clock_limit(hwmgr, input);
+}
-static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result)
+static int rv_init_power_gate_state(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -340,20 +249,13 @@ static int rv_tf_init_power_gate_state(struct pp_hwmgr *hwmgr, void *input,
return 0;
}
-static const struct phm_master_table_item rv_setup_asic_list[] = {
- { .tableFunction = rv_tf_init_power_gate_state },
- { }
-};
-static const struct phm_master_table_header rv_setup_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_setup_asic_list
-};
+static int rv_setup_asic_task(struct pp_hwmgr *hwmgr)
+{
+ return rv_init_power_gate_state(hwmgr);
+}
-static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_reset_cc6_data(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
@@ -365,66 +267,42 @@ static int rv_tf_reset_cc6_data(struct pp_hwmgr *hwmgr,
return 0;
}
-static const struct phm_master_table_item rv_power_down_asic_list[] = {
- { .tableFunction = rv_tf_reset_cc6_data },
- { }
-};
-
-static const struct phm_master_table_header rv_power_down_asic_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_power_down_asic_list
-};
-
+static int rv_power_off_asic(struct pp_hwmgr *hwmgr)
+{
+ return rv_reset_cc6_data(hwmgr);
+}
-static int rv_tf_disable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_DisableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_disable_dpm_list[] = {
- { .tableFunction = rv_tf_disable_gfx_off },
- { },
-};
-
-
-static const struct phm_master_table_header rv_disable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_disable_dpm_list
-};
+static int rv_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_disable_gfx_off(hwmgr);
+}
-static int rv_tf_enable_gfx_off(struct pp_hwmgr *hwmgr,
- void *input, void *output,
- void *storage, int result)
+static int rv_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
if (rv_data->gfx_off_controled_by_driver)
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableGfxOff);
return 0;
}
-static const struct phm_master_table_item rv_enable_dpm_list[] = {
- { .tableFunction = rv_tf_enable_gfx_off },
- { },
-};
-
-static const struct phm_master_table_header rv_enable_dpm_master = {
- 0,
- PHM_MasterTableFlag_None,
- rv_enable_dpm_list
-};
+static int rv_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
+{
+ return rv_enable_gfx_off(hwmgr);
+}
static int rv_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
@@ -505,7 +383,7 @@ static int rv_populate_clock_table(struct pp_hwmgr *hwmgr)
DpmClocks_t *table = &(rv_data->clock_table);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- result = rv_copy_table_from_smc(hwmgr->smumgr, (uint8_t *)table, CLOCKTABLE);
+ result = rv_copy_table_from_smc(hwmgr, (uint8_t *)table, CLOCKTABLE);
PP_ASSERT_WITH_CODE((0 == result),
"Attempt to copy clock table from smc failed",
@@ -563,9 +441,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
return result;
}
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerPlaySupport);
-
rv_populate_clock_table(hwmgr);
result = rv_get_system_info_data(hwmgr);
@@ -576,40 +451,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
rv_construct_boot_state(hwmgr);
- result = phm_construct_table(hwmgr, &rv_setup_asic_master,
- &(hwmgr->setup_asic));
- if (result != 0) {
- pr_err("Fail to construct setup ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_power_down_asic_master,
- &(hwmgr->power_down_asic));
- if (result != 0) {
- pr_err("Fail to construct power down ASIC\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_set_power_state_master,
- &(hwmgr->set_power_state));
- if (result != 0) {
- pr_err("Fail to construct set_power_state\n");
- return result;
- }
-
- result = phm_construct_table(hwmgr, &rv_disable_dpm_master,
- &(hwmgr->disable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to disable_dynamic_state\n");
- return result;
- }
- result = phm_construct_table(hwmgr, &rv_enable_dpm_master,
- &(hwmgr->enable_dynamic_state_management));
- if (result != 0) {
- pr_err("Fail to enable_dynamic_state\n");
- return result;
- }
-
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
RAVEN_MAX_HARDWARE_POWERLEVELS;
@@ -624,8 +465,6 @@ static int rv_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- rv_init_vq_budget_table(hwmgr);
-
return result;
}
@@ -634,46 +473,21 @@ static int rv_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
struct rv_hwmgr *rv_data = (struct rv_hwmgr *)(hwmgr->backend);
struct rv_clock_voltage_information *pinfo = &(rv_data->clock_vol_info);
- phm_destroy_table(hwmgr, &(hwmgr->set_power_state));
- phm_destroy_table(hwmgr, &(hwmgr->enable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->disable_dynamic_state_management));
- phm_destroy_table(hwmgr, &(hwmgr->power_down_asic));
- phm_destroy_table(hwmgr, &(hwmgr->setup_asic));
-
- if (pinfo->vdd_dep_on_dcefclk) {
- kfree(pinfo->vdd_dep_on_dcefclk);
- pinfo->vdd_dep_on_dcefclk = NULL;
- }
- if (pinfo->vdd_dep_on_socclk) {
- kfree(pinfo->vdd_dep_on_socclk);
- pinfo->vdd_dep_on_socclk = NULL;
- }
- if (pinfo->vdd_dep_on_fclk) {
- kfree(pinfo->vdd_dep_on_fclk);
- pinfo->vdd_dep_on_fclk = NULL;
- }
- if (pinfo->vdd_dep_on_dispclk) {
- kfree(pinfo->vdd_dep_on_dispclk);
- pinfo->vdd_dep_on_dispclk = NULL;
- }
- if (pinfo->vdd_dep_on_dppclk) {
- kfree(pinfo->vdd_dep_on_dppclk);
- pinfo->vdd_dep_on_dppclk = NULL;
- }
- if (pinfo->vdd_dep_on_phyclk) {
- kfree(pinfo->vdd_dep_on_phyclk);
- pinfo->vdd_dep_on_phyclk = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
-
- if (NULL != hwmgr->dyn_state.vq_budgeting_table) {
- kfree(hwmgr->dyn_state.vq_budgeting_table);
- hwmgr->dyn_state.vq_budgeting_table = NULL;
- }
+ kfree(pinfo->vdd_dep_on_dcefclk);
+ pinfo->vdd_dep_on_dcefclk = NULL;
+ kfree(pinfo->vdd_dep_on_socclk);
+ pinfo->vdd_dep_on_socclk = NULL;
+ kfree(pinfo->vdd_dep_on_fclk);
+ pinfo->vdd_dep_on_fclk = NULL;
+ kfree(pinfo->vdd_dep_on_dispclk);
+ pinfo->vdd_dep_on_dispclk = NULL;
+ kfree(pinfo->vdd_dep_on_dppclk);
+ pinfo->vdd_dep_on_dppclk = NULL;
+ kfree(pinfo->vdd_dep_on_phyclk);
+ pinfo->vdd_dep_on_phyclk = NULL;
+
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
kfree(hwmgr->backend);
hwmgr->backend = NULL;
@@ -687,12 +501,12 @@ static int rv_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
-static int rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
-static int rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t rv_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
return 0;
}
@@ -711,18 +525,9 @@ static int rv_dpm_get_pp_table_entry_callback(
{
struct rv_power_state *rv_ps = cast_rv_ps(hw_ps);
- const ATOM_PPLIB_CZ_CLOCK_INFO *rv_clock_info = clock_info;
-
- struct phm_clock_voltage_dependency_table *table =
- hwmgr->dyn_state.vddc_dependency_on_sclk;
- uint8_t clock_info_index = rv_clock_info->index;
-
- if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
- clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
-
- rv_ps->levels[index].engine_clock = table->entries[clock_info_index].clk;
- rv_ps->levels[index].vddc_index = (uint8_t)table->entries[clock_info_index].v;
+ rv_ps->levels[index].engine_clock = 0;
+ rv_ps->levels[index].vddc_index = 0;
rv_ps->level = index + 1;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
@@ -814,12 +619,12 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
ps = cast_const_rv_ps(state);
level_index = index > ps->level - 1 ? ps->level - 1 : index;
- level->coreClock = ps->levels[level_index].engine_clock;
+ level->coreClock = 30000;
if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
for (i = 1; i < ps->level; i++) {
if (ps->levels[i].engine_clock > data->dce_slow_sclk_threshold) {
- level->coreClock = ps->levels[i].engine_clock;
+ level->coreClock = 30000;
break;
}
}
@@ -829,8 +634,9 @@ static int rv_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_p
vol_dep_record_index = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
level->memory_clock =
data->clock_vol_info.vdd_dep_on_fclk->entries[vol_dep_record_index].clk;
- } else
+ } else {
level->memory_clock = data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk;
+ }
level->nonLocalMemoryFreq = 0;
level->nonLocalMemoryWidth = 0;
@@ -993,7 +799,7 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, msg,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, msg,
clk_freq);
return result;
@@ -1001,7 +807,8 @@ int rv_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
static int rv_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
{
- return -EINVAL;
+ clocks->engine_max_clock = 80000; /* driver can't get engine clock, temp hard code to 800MHz */
+ return 0;
}
static int rv_thermal_get_temperature(struct pp_hwmgr *hwmgr)
@@ -1058,6 +865,13 @@ static const struct pp_hwmgr_func rv_hwmgr_funcs = {
.get_clock_by_type_with_voltage = rv_get_clock_by_type_with_voltage,
.get_max_high_clocks = rv_get_max_high_clocks,
.read_sensor = rv_read_sensor,
+ .set_active_display_count = rv_set_active_display_count,
+ .set_deep_sleep_dcefclk = rv_set_deep_sleep_dcefclk,
+ .dynamic_state_management_enable = rv_enable_dpm_tasks,
+ .power_off_asic = rv_power_off_asic,
+ .asic_setup = rv_setup_asic_task,
+ .power_state_set = rv_set_power_state_tasks,
+ .dynamic_state_management_disable = rv_disable_dpm_tasks,
};
int rv_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
index 2472b50e54cf..68d61bd95ca0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h
@@ -293,7 +293,9 @@ struct rv_hwmgr {
DpmClocks_t clock_table;
uint32_t active_process_mask;
- bool need_min_deep_sleep_dcefclk; /* disabled by default */
+ bool need_min_deep_sleep_dcefclk;
+ uint32_t deep_sleep_dcefclk;
+ uint32_t num_active_display;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 261b828ad590..69a0678ace98 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -27,21 +27,21 @@
static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
PPSMC_MSG_UVDDPM_Disable);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
PPSMC_MSG_VCEDPM_Disable);
}
static int smu7_enable_disable_samu_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, enable ?
+ return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_SAMUDPM_Enable :
PPSMC_MSG_SAMUDPM_Disable);
}
@@ -70,7 +70,7 @@ static int smu7_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_UVDPowerOFF);
return 0;
}
@@ -80,10 +80,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cf_want_uvd_power_gating(hwmgr)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 1);
} else {
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDPowerON, 0);
}
}
@@ -94,7 +94,7 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerOFF);
return 0;
}
@@ -102,7 +102,7 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_VCEPowerON);
return 0;
}
@@ -111,7 +111,7 @@ static int smu7_powerdown_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerOFF);
return 0;
}
@@ -120,7 +120,7 @@ static int smu7_powerup_samu(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SamuPowerGating))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SAMPowerON);
return 0;
}
@@ -140,7 +140,7 @@ int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr)
return 0;
}
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -166,10 +166,9 @@ int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
smu7_update_uvd_dpm(hwmgr, false);
}
- return 0;
}
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -194,7 +193,6 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
AMD_PG_STATE_UNGATE);
smu7_update_vce_dpm(hwmgr, false);
}
- return 0;
}
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate)
@@ -237,7 +235,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -247,7 +245,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -260,7 +258,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -271,7 +269,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -284,7 +282,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -297,7 +295,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -311,7 +309,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -331,7 +329,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -341,7 +339,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -354,7 +352,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -365,7 +363,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -378,7 +376,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -388,7 +386,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -401,7 +399,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -412,7 +410,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -425,7 +423,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
@@ -436,7 +434,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -449,7 +447,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, msg, value))
+ hwmgr, msg, value))
return -EINVAL;
}
break;
@@ -489,9 +487,9 @@ int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
active_cus = sys_info.value;
if (enable)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE, active_cus);
else
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GFX_CU_PG_DISABLE);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index c96ed9ed7eaf..7b54d48b2ce2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -27,8 +27,8 @@
#include "smu7_hwmgr.h"
#include "pp_asicblocks.h"
-int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
-int smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate);
+void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr);
int smu7_powergate_samu(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c2743233ba10..8dbe9148aad3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/div64.h>
+#include <drm/amdgpu_drm.h>
#include "pp_acpi.h"
#include "ppatomctrl.h"
#include "atombios.h"
@@ -163,7 +164,7 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
{
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
return 0;
}
@@ -300,28 +301,28 @@ static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
"Failed to retrieve SVI2 VDDC table from dependancy table.", return result;);
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDC);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
PP_ASSERT_WITH_CODE(
(data->vddc_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddc_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE(
(data->vddgfx_voltage_table.count <= tmp),
"Too many voltage values for VDDC. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddgfx_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDCI);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
PP_ASSERT_WITH_CODE(
(data->vddci_voltage_table.count <= tmp),
"Too many voltage values for VDDCI. Trimming to fit state table.",
phm_trim_voltage_table_to_fit_state_table(tmp,
&(data->vddci_voltage_table)));
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_MVDD);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
PP_ASSERT_WITH_CODE(
(data->mvdd_voltage_table.count <= tmp),
"Too many voltage values for MVDD. Trimming to fit state table.",
@@ -387,6 +388,7 @@ static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
/* Clear reset for voting clients before enabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -394,50 +396,26 @@ static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, data->voting_rights_clients0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, data->voting_rights_clients1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, data->voting_rights_clients2);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, data->voting_rights_clients3);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, data->voting_rights_clients4);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, data->voting_rights_clients5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, data->voting_rights_clients6);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, data->voting_rights_clients7);
-
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4,
+ data->voting_rights_clients[i]);
return 0;
}
static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
{
+ int i;
+
/* Reset voting clients before disabling DPM */
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_0, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_1, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_2, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_3, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_4, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_5, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_6, 0);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- ixCG_FREQ_TRAN_VOTING_7, 0);
+ for (i = 0; i < 8; i++)
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
return 0;
}
@@ -493,7 +471,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
}
/**
@@ -551,7 +529,7 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->pcie_gen_performance = data->pcie_gen_power_saving;
data->pcie_lane_performance = data->pcie_lane_power_saving;
}
- tmp = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_LINK);
+ tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
tmp,
MAX_REGULAR_DPM_NUMBER);
@@ -607,13 +585,20 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
data->dpm_table.pcie_speed_table.count = 6;
}
/* Populate last level for boot PCIE level, but do not increment count. */
- phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
+ get_pcie_gen_support(data->pcie_gen_cap,
+ PP_Max_PCIEGen),
+ data->vbios_boot_state.pcie_lane_bootup_value);
+ } else {
+ phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
data->dpm_table.pcie_speed_table.count,
get_pcie_gen_support(data->pcie_gen_cap,
PP_Min_PCIEGen),
get_pcie_lane_support(data->pcie_lane_cap,
PP_Max_PCIELane));
-
+ }
return 0;
}
@@ -625,27 +610,27 @@ static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
phm_reset_single_dpm_table(
&data->dpm_table.sclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_GRAPHICS),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mclk_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddc_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDC),
MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.vddci_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
phm_reset_single_dpm_table(
&data->dpm_table.mvdd_table,
- smum_get_mac_definition(hwmgr->smumgr,
+ smum_get_mac_definition(hwmgr,
SMU_MAX_LEVELS_MVDD),
MAX_REGULAR_DPM_NUMBER);
return 0;
@@ -689,7 +674,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_sclk_table->entries[i].clk) {
data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
allowed_vdd_sclk_table->entries[i].clk;
- data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */
+ data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.sclk_table.count++;
}
}
@@ -703,7 +688,7 @@ static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
allowed_vdd_mclk_table->entries[i].clk) {
data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
allowed_vdd_mclk_table->entries[i].clk;
- data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */
+ data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
data->dpm_table.mclk_table.count++;
}
}
@@ -855,7 +840,7 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
- return smum_send_msg_to_smc(hwmgr->smumgr,
+ return smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableVRHotGPIOInterrupt);
return 0;
@@ -873,7 +858,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
return 0;
}
@@ -883,7 +868,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
return 0;
}
@@ -892,12 +877,12 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -912,7 +897,7 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr->smumgr,
+ if (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MASTER_DeepSleep_OFF)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
@@ -928,12 +913,12 @@ static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t soft_register_value = 0;
uint32_t handshake_disables_offset = data->soft_regs_start
- + smum_get_offsetof(hwmgr->smumgr,
+ + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, HandshakeDisables);
soft_register_value = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC, handshake_disables_offset);
- soft_register_value |= smum_get_mac_definition(hwmgr->smumgr,
+ soft_register_value |= smum_get_mac_definition(hwmgr,
SMU_UVD_MCLK_HANDSHAKE_DISABLE);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
handshake_disables_offset, soft_register_value);
@@ -947,7 +932,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
/* enable SCLK dpm */
if (!data->sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -956,20 +941,31 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
smu7_disable_handshake_uvd(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_Enable)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
- udelay(10);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
+ } else {
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
+ udelay(10);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
+ }
}
return 0;
@@ -993,11 +989,15 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
data->soft_regs_start +
- smum_get_offsetof(hwmgr->smumgr, SMU_SoftRegisters,
+ smum_get_offsetof(hwmgr, SMU_SoftRegisters,
VoltageChangeTimeout), 0x1000);
PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
SWRST_COMMAND_1, RESETLC, 0x0);
+ if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
+ cgs_write_register(hwmgr->device, 0x1488,
+ (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
+
if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
pr_err("Failed to enable Sclk DPM and Mclk DPM!");
return -EINVAL;
@@ -1006,7 +1006,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
/* enable PCIE dpm */
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ (0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Enable)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
@@ -1014,7 +1014,7 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
- PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_EnableACDCGPIOInterrupt)),
"Failed to enable AC DC GPIO Interrupt!",
);
@@ -1032,7 +1032,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
}
/* disable MCLK dpm */
@@ -1040,7 +1040,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
}
return 0;
@@ -1060,7 +1060,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
/* disable PCIE dpm */
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
- (smum_send_msg_to_smc(hwmgr->smumgr,
+ (smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_Disable) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
@@ -1072,7 +1072,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
return 0;
}
@@ -1226,7 +1226,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1361,14 +1361,14 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->vddc_vddgfx_delta = 300;
data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
- data->voting_rights_clients0 = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
- data->voting_rights_clients1 = SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
- data->voting_rights_clients2 = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
- data->voting_rights_clients3 = SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
- data->voting_rights_clients4 = SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
- data->voting_rights_clients5 = SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
- data->voting_rights_clients6 = SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
- data->voting_rights_clients7 = SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
+ data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
+ data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
+ data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
+ data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
+ data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
+ data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
+ data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
+ data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
@@ -1382,23 +1382,40 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->force_pcie_gen = PP_PCIEGenInvalid;
data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
- if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->smumgr->is_kicker) {
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
uint8_t tmp1, tmp2;
uint16_t tmp3 = 0;
atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
&tmp3);
tmp3 = (tmp3 >> 5) & 0x3;
data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
+ } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
+ data->vddc_phase_shed_control = 1;
+ } else {
+ data->vddc_phase_shed_control = 0;
+ }
+
+ if (hwmgr->chip_id == CHIP_HAWAII) {
+ data->thermal_temp_setting.temperature_low = 94500;
+ data->thermal_temp_setting.temperature_high = 95000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
+ } else {
+ data->thermal_temp_setting.temperature_low = 99500;
+ data->thermal_temp_setting.temperature_high = 100000;
+ data->thermal_temp_setting.temperature_shutdown = 104000;
}
data->fast_watermark_threshold = 100;
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
+ VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1406,25 +1423,24 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableMVDDControl)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
- if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control) {
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDGFX);
- }
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ControlVDDCI)) {
- if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
- else if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
+ else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
}
@@ -1543,7 +1559,7 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (vddc >= 2000 || vddc == 0)
return -EINVAL;
} else {
- pr_warn("failed to retrieving EVV voltage!\n");
+ pr_debug("failed to retrieving EVV voltage!\n");
continue;
}
@@ -1676,7 +1692,7 @@ static int phm_add_voltage(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((0 != look_up_table->count),
"Lookup Table empty.", return -EINVAL);
- i = smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_VDDGFX);
+ i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
PP_ASSERT_WITH_CODE((i >= look_up_table->count),
"Lookup Table is full.", return -EINVAL);
@@ -2274,7 +2290,7 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
}
- if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count > 1)
+ if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
return 0;
@@ -2282,40 +2298,65 @@ static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) {
- kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
- hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
- }
- pp_smu7_thermal_fini(hwmgr);
- if (NULL != hwmgr->backend) {
- kfree(hwmgr->backend);
- hwmgr->backend = NULL;
- }
+ kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
+ hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
+ kfree(hwmgr->backend);
+ hwmgr->backend = NULL;
+
+ return 0;
+}
+
+static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
+{
+ uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int i;
+ if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
+ for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
+ virtual_voltage_id,
+ efuse_voltage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
+ data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
+ data->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
+ data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
+ data->vddci_leakage.count++;
+ }
+ }
+ }
+ }
return 0;
}
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data;
- int result;
+ int result = 0;
data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
hwmgr->backend = data;
- pp_smu7_thermal_initialize(hwmgr);
-
smu7_patch_voltage_workaround(hwmgr);
smu7_init_dpm_defaults(hwmgr);
/* Get leakage voltage based on leakage ID. */
- result = smu7_get_evv_voltages(hwmgr);
-
- if (result) {
- pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
- return -EINVAL;
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EVV)) {
+ result = smu7_get_evv_voltages(hwmgr);
+ if (result) {
+ pr_info("Get EVV Voltage Failed. Abort Driver loading!\n");
+ return -EINVAL;
+ }
+ } else {
+ smu7_get_elb_voltages(hwmgr);
}
if (hwmgr->pp_table_version == PP_TABLE_V1) {
@@ -2382,7 +2423,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel, level);
}
}
@@ -2395,7 +2436,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2409,7 +2450,7 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
level++;
if (level)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2428,14 +2469,14 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
}
@@ -2451,7 +2492,7 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
return -EINVAL;
if (!data->pcie_dpm_key_disabled) {
- smum_send_msg_to_smc(hwmgr->smumgr,
+ smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PCIeDPM_UnForceLevel);
}
@@ -2468,7 +2509,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
(1 << level));
@@ -2478,7 +2519,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
(1 << level));
}
@@ -2488,7 +2529,7 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
level = phm_get_lowest_enabled_level(hwmgr,
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
(level));
}
@@ -2572,51 +2613,16 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = smu7_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = smu7_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -2625,26 +2631,23 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
-
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
+ }
+ return ret;
}
static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
@@ -2843,7 +2846,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
}
-static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -2865,7 +2868,7 @@ static int smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
[smu7_ps->performance_level_count-1].memory_clock;
}
-static int smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct smu7_power_state *smu7_ps;
@@ -3002,7 +3005,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
[smu7_power_state->performance_level_count++]);
PP_ASSERT_WITH_CODE(
- (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3071,11 +3074,11 @@ static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].vddci !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3166,7 +3169,7 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
data->highest_mclk = memory_clock;
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
+ (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
@@ -3219,11 +3222,11 @@ static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
if (dep_mclk_table->entries[0].clk !=
data->vbios_boot_state.mclk_bootup_value)
- pr_err("Single MCLK entry VDDCI/MCLK dependency table "
+ pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
"does not match VBIOS boot MCLK level");
if (dep_mclk_table->entries[0].v !=
data->vbios_boot_state.vddci_bootup_value)
- pr_err("Single VDDCI entry VDDCI/MCLK dependency table "
+ pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
"does not match VBIOS boot VDDCI level");
}
@@ -3312,14 +3315,14 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr,
struct pp_gpu_power *query)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogStart),
"Failed to start pm status log!",
return -1);
msleep_interruptible(20);
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_PmStatusLogSample),
"Failed to sample pm status log!",
return -1);
@@ -3353,19 +3356,19 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- offset = data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
AverageGraphicsActivity);
@@ -3532,7 +3535,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_FreezeLevel),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3544,7 +3547,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_MCLKDPM_FreezeLevel),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3762,7 +3765,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3774,7 +3777,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
- PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_SCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
@@ -3822,11 +3825,14 @@ static int smu7_notify_link_speed_change_after_state_change(
static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int ret = 0;
- if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
+ smum_send_msg_to_smc_with_parameter(hwmgr,
(PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
- return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ ret = (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ }
+ return ret;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -3899,10 +3905,7 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
}
@@ -3911,7 +3914,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr->smumgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
}
static int
@@ -3974,12 +3977,12 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
PreVBlankGap), 0x64);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
- data->soft_regs_start + smum_get_offsetof(hwmgr->smumgr,
+ data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters,
VBlankTimeout),
(frame_time_in_us - pre_vbi_time_in_us));
@@ -4004,10 +4007,7 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
- if (phm_is_hw_access_blocked(hwmgr))
- return 0;
-
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
}
@@ -4249,21 +4249,21 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ AMD_DPM_FORCED_LEVEL_LOW |
+ AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
switch (type) {
case PP_SCLK:
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
break;
@@ -4276,7 +4276,7 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
level++;
if (!data->pcie_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
level);
break;
@@ -4300,7 +4300,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetSclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < sclk_table->count; i++) {
@@ -4316,7 +4316,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_API_GetMclkFrequency);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
for (i = 0; i < mclk_table->count; i++) {
@@ -4353,31 +4353,27 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
-static int smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_MicrocodeFanControl))
- result = smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
+ smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = smu7_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
+ smu7_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
-static int smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
}
@@ -4606,7 +4602,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (sclk_mask) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
sclk_dpm_enable_mask &
@@ -4615,7 +4611,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (mclk_mask) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
data->dpm_level_enable_mask.
mclk_dpm_enable_mask &
@@ -4627,8 +4623,7 @@ static int smu7_set_power_profile_state(struct pp_hwmgr *hwmgr,
static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
{
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (smu_data == NULL)
return -EINVAL;
@@ -4640,13 +4635,13 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs),
"Failed to enable AVFS!",
return -EINVAL);
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON))
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr->smumgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs),
"Failed to disable AVFS!",
return -EINVAL);
@@ -4703,6 +4698,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.set_power_profile_state = smu7_set_power_profile_state,
.avfs_control = smu7_avfs_control,
.disable_smc_firmware_ctf = smu7_thermal_disable_alert,
+ .start_thermal_controller = smu7_start_thermal_controller,
};
uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
index f221e17b67e7..e021154aedbd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h
@@ -182,14 +182,7 @@ struct smu7_hwmgr {
struct smu7_dpm_table dpm_table;
struct smu7_dpm_table golden_dpm_table;
- uint32_t voting_rights_clients0;
- uint32_t voting_rights_clients1;
- uint32_t voting_rights_clients2;
- uint32_t voting_rights_clients3;
- uint32_t voting_rights_clients4;
- uint32_t voting_rights_clients5;
- uint32_t voting_rights_clients6;
- uint32_t voting_rights_clients7;
+ uint32_t voting_rights_clients[8];
uint32_t static_screen_threshold_unit;
uint32_t static_screen_threshold;
uint32_t voltage_control;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 1dc31aa72781..85ca16abb626 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -629,51 +629,38 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t block_en = 0;
int32_t result = 0;
uint32_t didt_block;
- uint32_t data;
if (hwmgr->chip_id == CHIP_POLARIS11)
didt_block = Polaris11_DIDTBlock_Info;
else
didt_block = DIDTBlock_Info;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_SQRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~SQ_Enable_MASK;
didt_block |= block_en << SQ_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_DBRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~DB_Enable_MASK;
didt_block |= block_en << DB_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ? en : 0;
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TDRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TD_Enable_MASK;
didt_block |= block_en << TD_Enable_SHIFT;
- block_en = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping) ? en : 0;
-
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((block_en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ block_en = PP_CAP(PHM_PlatformCaps_TCPRamping) ? en : 0;
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, block_en);
didt_block &= ~TCP_Enable_MASK;
didt_block |= block_en << TCP_Enable_SHIFT;
-
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
return result;
}
@@ -753,12 +740,13 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (result == 0)
num_se = sys_info.value;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
+ cgs_lock_grbm_idx(hwmgr->device, true);
value = 0;
value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
for (count = 0; count < num_se; count++) {
@@ -775,7 +763,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
} else if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- if (hwmgr->smumgr->is_kicker)
+ if (hwmgr->is_kicker)
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker);
else
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
@@ -793,11 +781,12 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", return result);
}
+ cgs_lock_grbm_idx(hwmgr->device, false);
cgs_enter_safe_mode(hwmgr->device, false);
}
@@ -808,10 +797,10 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping) ||
+ PP_CAP(PHM_PlatformCaps_DBRamping) ||
+ PP_CAP(PHM_PlatformCaps_TDRamping) ||
+ PP_CAP(PHM_PlatformCaps_TCPRamping)) {
cgs_enter_safe_mode(hwmgr->device, true);
@@ -820,7 +809,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
"Post DIDT enable clock gating failed.",
return result);
if (hwmgr->chip_id == CHIP_POLARIS11) {
- result = smum_send_msg_to_smc(hwmgr->smumgr,
+ result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDpmDidt));
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", return result);
@@ -836,10 +825,9 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC)) {
+ if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_EnableCac));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -854,9 +842,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_CAC) && data->cac_enabled) {
- int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
+ int smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableCac));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -872,7 +859,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PkgPwrSetLimit, n);
return 0;
}
@@ -880,7 +867,7 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
}
@@ -899,11 +886,9 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
-
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
@@ -913,14 +898,13 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
}
if (data->enable_pkg_pwr_tracking_feature) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
uint32_t default_limit =
(uint32_t)(cac_table->usMaximumPowerDeliveryLimit * 256);
-
data->power_containment_features |=
POWERCONTAINMENT_FEATURE_PkgPwrLimit;
@@ -937,14 +921,13 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment) &&
- data->power_containment_features) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment) &&
+ data->power_containment_features) {
int smc_result;
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_TDCLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
@@ -953,7 +936,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_DisableDTE));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
@@ -962,7 +945,7 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
- smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
+ smc_result = smum_send_msg_to_smc(hwmgr,
(uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
@@ -987,16 +970,17 @@ int smu7_power_control_set_level(struct pp_hwmgr *hwmgr)
cac_table = table_info->cac_dtp_table;
else
cac_table = hwmgr->dyn_state.cac_dtp_table;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
/* adjustment percentage has already been validated */
adjust_percent = hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
(-1 * hwmgr->platform_descriptor.TDPAdjustment);
- /* SMC requested that target_tdp to be 7 bit fraction in DPM table
- * but message to be 8 bit fraction for messages
- */
- target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+
+ if (hwmgr->chip_id > CHIP_TONGA)
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usTDP * 256)) / 100;
+ else
+ target_tdp = ((100 + adjust_percent) * (int)(cac_table->usConfigurableTDP * 256)) / 100;
+
result = smu7_set_overdriver_target_tdp(hwmgr, (uint32_t)target_tdp);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index baddb569a8b8..d7aa643cdb51 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -37,9 +37,8 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
- hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
fan_speed_info->supports_rpm_write = true;
fan_speed_info->min_rpm = hwmgr->thermal_controller.fanInfo.ulMinRPM;
@@ -87,8 +86,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
uint32_t crystal_clock_freq;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution == 0))
+ !hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution)
return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -152,13 +150,11 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
int result;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM))
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
hwmgr->thermal_controller.
advanceFanControlParameters.usMaxFanRPM);
@@ -169,12 +165,12 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
} else {
cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
}
if (!result && hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature)
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ucTargetTemperature);
@@ -187,7 +183,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
}
/**
@@ -209,8 +205,7 @@ int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
@@ -241,8 +236,7 @@ int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
result = smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
if (!result)
result = smu7_fan_ctrl_start_smc_fan_control(hwmgr);
@@ -270,8 +264,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
(speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
crystal_clock_freq = smu7_get_xclk(hwmgr);
@@ -367,7 +360,7 @@ static int smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
*
* @param hwmgr The address of the hardware manager.
*/
-int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
+static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
{
uint32_t alert;
@@ -378,7 +371,7 @@ int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
}
/**
@@ -396,7 +389,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
}
/**
@@ -423,16 +416,14 @@ int smu7_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+static int smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) {
smu7_fan_ctrl_start_smc_fan_control(hwmgr);
smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -440,108 +431,34 @@ static int tf_smu7_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-static int tf_smu7_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-static int tf_smu7_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-static int tf_smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return smu7_thermal_disable_alert(hwmgr);
-}
+ smu7_thermal_initialize(hwmgr);
+ ret = smu7_thermal_set_temperature_range(hwmgr, range->min, range->max);
+ if (ret)
+ return -EINVAL;
+ smu7_thermal_enable_alert(hwmgr);
+ ret = smum_thermal_avfs_enable(hwmgr);
+ if (ret)
+ return -EINVAL;
-static const struct phm_master_table_item
-phm_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_smu7_thermal_initialize },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { .tableFunction = smum_thermal_avfs_enable },
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = smum_thermal_setup_fan_table },
- { .tableFunction = tf_smu7_thermal_start_smc_fan_control },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_start_thermal_controller_master_list
-};
-
-static const struct phm_master_table_item
-phm_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_smu7_thermal_disable_alert },
- { .tableFunction = tf_smu7_thermal_set_temperature_range },
- { .tableFunction = tf_smu7_thermal_enable_alert },
- { }
-};
-
-static const struct phm_master_table_header
-phm_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- phm_thermal_set_temperature_range_master_list
-};
+ smum_thermal_setup_fan_table(hwmgr);
+ smu7_thermal_start_smc_fan_control(hwmgr);
+ return 0;
+}
+
+
int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -550,35 +467,3 @@ int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
return 0;
}
-/**
-* Initializes the thermal controller related functions in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &phm_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &phm_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
-void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr)
-{
- phm_destroy_table(hwmgr, &(hwmgr->set_temperature_range));
- phm_destroy_table(hwmgr, &(hwmgr->start_thermal_controller));
- return;
-} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
index ba71b608fa75..42c1ba0fad78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.h
@@ -46,14 +46,13 @@ extern int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode);
extern int smu7_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_smu7_thermal_initialize(struct pp_hwmgr *hwmgr);
-extern void pp_smu7_thermal_fini(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed);
extern int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed);
extern int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern int smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr);
extern int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int smu7_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *temperature_range);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f8f02e70b8bc..a59d282797f5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -201,9 +201,6 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_ControlVDDCI);
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_TablelessHardwareInterface);
-
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_EnableSMU7ThermalManagement);
sys_info.size = sizeof(struct cgs_system_info);
@@ -381,12 +378,10 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.socclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_SOCCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_UVDDPM))
+ if (PP_CAP(PHM_PlatformCaps_UVDDPM))
data->smu_features[GNLD_DPM_UVD].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_VCEDPM))
+ if (PP_CAP(PHM_PlatformCaps_VCEDPM))
data->smu_features[GNLD_DPM_VCE].supported = true;
if (!data->registry_data.pcie_dpm_key_disabled)
@@ -395,9 +390,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (!data->registry_data.dcefclk_dpm_key_disabled)
data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkDeepSleep) &&
- data->registry_data.sclk_deep_sleep_support) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep) &&
+ data->registry_data.sclk_deep_sleep_support) {
data->smu_features[GNLD_DS_GFXCLK].supported = true;
data->smu_features[GNLD_DS_SOCCLK].supported = true;
data->smu_features[GNLD_DS_LCLK].supported = true;
@@ -431,8 +425,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
- vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
+ vega10_read_arg_from_smc(hwmgr, &(data->smu_version));
/* ACG firmware has major version 5 */
if ((data->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -497,8 +491,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
if (!vega10_get_socclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ClockStretcher)) {
+ if (PP_CAP(PHM_PlatformCaps_ClockStretcher)) {
for (j = 1; j < socclk_table->count; j++) {
if (socclk_table->entries[j].clk == sclk &&
socclk_table->entries[j].cks_enable == 0) {
@@ -591,61 +584,37 @@ static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
static int vega10_patch_voltage_dependency_tables_with_lookup_table(
struct pp_hwmgr *hwmgr)
{
- uint8_t entry_id;
- uint8_t voltage_id;
+ uint8_t entry_id, voltage_id;
+ unsigned i;
struct phm_ppt_v2_information *table_info =
(struct phm_ppt_v2_information *)(hwmgr->pptable);
- struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
- table_info->vdd_dep_on_socclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
- table_info->vdd_dep_on_sclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
- table_info->vdd_dep_on_dcefclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
- table_info->vdd_dep_on_pixclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
- table_info->vdd_dep_on_dispclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
- table_info->vdd_dep_on_phyclk;
- struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
- table_info->vdd_dep_on_mclk;
struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
table_info->mm_dep_table;
+ struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
+ table_info->vdd_dep_on_mclk;
- for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
- voltage_id = socclk_table->entries[entry_id].vddInd;
- socclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
- voltage_id = gfxclk_table->entries[entry_id].vddInd;
- gfxclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
- voltage_id = dcefclk_table->entries[entry_id].vddInd;
- dcefclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
-
- for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
- voltage_id = pixclk_table->entries[entry_id].vddInd;
- pixclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
+ for (i = 0; i < 6; i++) {
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdt;
+ switch (i) {
+ case 0: vdt = table_info->vdd_dep_on_socclk; break;
+ case 1: vdt = table_info->vdd_dep_on_sclk; break;
+ case 2: vdt = table_info->vdd_dep_on_dcefclk; break;
+ case 3: vdt = table_info->vdd_dep_on_pixclk; break;
+ case 4: vdt = table_info->vdd_dep_on_dispclk; break;
+ case 5: vdt = table_info->vdd_dep_on_phyclk; break;
+ }
- for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
- voltage_id = dspclk_table->entries[entry_id].vddInd;
- dspclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < vdt->count; entry_id++) {
+ voltage_id = vdt->entries[entry_id].vddInd;
+ vdt->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ }
}
- for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
- voltage_id = phyclk_table->entries[entry_id].vddInd;
- phyclk_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
+ for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
+ voltage_id = mm_table->entries[entry_id].vddcInd;
+ mm_table->entries[entry_id].vddc =
+ table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
}
for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
@@ -660,11 +629,6 @@ static int vega10_patch_voltage_dependency_tables_with_lookup_table(
table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
}
- for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
- voltage_id = mm_table->entries[entry_id].vddcInd;
- mm_table->entries[entry_id].vddc =
- table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
- }
return 0;
@@ -838,8 +802,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
}
/* VDDCI_MEM */
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ControlVDDCI)) {
+ if (PP_CAP(PHM_PlatformCaps_ControlVDDCI)) {
if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
@@ -959,7 +922,7 @@ static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
{
uint32_t features_enabled;
- if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
+ if (!vega10_get_smc_features(hwmgr, &features_enabled)) {
if (features_enabled & SMC_DPM_FEATURES)
return true;
}
@@ -1411,10 +1374,8 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
data->odn_dpm_table.odn_core_clock_dpm_levels.
number_of_performance_levels = data->dpm_table.gfx_table.count;
for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
@@ -2311,21 +2272,21 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
uint32_t agc_btc_response;
if (data->smu_features[GNLD_ACG].supported) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
- vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
+ vega10_read_arg_from_smc(hwmgr, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
} else {
@@ -2342,13 +2303,11 @@ static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (data->smu_features[GNLD_ACG].supported) {
- if (data->smu_features[GNLD_ACG].enabled) {
- if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
- data->smu_features[GNLD_ACG].smu_feature_bitmap))
+ if (data->smu_features[GNLD_ACG].supported &&
+ data->smu_features[GNLD_ACG].enabled)
+ if (!vega10_enable_smc_features(hwmgr, false,
+ data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = false;
- }
- }
return 0;
}
@@ -2363,9 +2322,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
if (!result) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot) &&
- (data->registry_data.regulator_hot_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot) &&
+ data->registry_data.regulator_hot_gpio_support) {
pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
@@ -2377,9 +2335,8 @@ static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
pp_table->VR1HotPolarity = 0;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_AutomaticDCTransition) &&
- (data->registry_data.ac_dc_switch_gpio_support)) {
+ if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition) &&
+ data->registry_data.ac_dc_switch_gpio_support) {
pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
} else {
@@ -2398,14 +2355,14 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
if (data->smu_features[GNLD_AVFS].supported) {
if (enable) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_AVFS].smu_feature_bitmap),
"[avfs_control] Attempt to Enable AVFS feature Failed!",
return -1);
data->smu_features[GNLD_AVFS].enabled = true;
} else {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_AVFS].smu_feature_id),
"[avfs_control] Attempt to Disable AVFS feature Failed!",
@@ -2428,11 +2385,11 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
+ vega10_read_arg_from_smc(hwmgr, &top32);
- smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
- vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
+ vega10_read_arg_from_smc(hwmgr, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
@@ -2446,7 +2403,7 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
avfs_fuse_table->VFT2_b = fuse.VFT2_b;
avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload FuseOVerride!",
@@ -2585,14 +2542,14 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
if (0 != boot_up_values.usVddc) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
(boot_up_values.usVddc * 4));
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
}
@@ -2618,7 +2575,7 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
vega10_populate_and_upload_avfs_fuse_override(hwmgr);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -2641,7 +2598,7 @@ static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already enabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"Enable THERMAL Feature Failed!",
@@ -2661,7 +2618,7 @@ static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
pr_info("THERMAL Feature Already disabled!");
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
"disable THERMAL Feature Failed!",
@@ -2677,11 +2634,10 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_RegulatorHot)) {
+ if (PP_CAP(PHM_PlatformCaps_RegulatorHot)) {
if (data->smu_features[GNLD_VR0HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2690,7 +2646,7 @@ static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
} else {
if (data->smu_features[GNLD_VR1HOT].supported) {
PP_ASSERT_WITH_CODE(
- !vega10_enable_smc_features(hwmgr->smumgr,
+ !vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
"Attempt to Enable VR0 Hot feature Failed!",
@@ -2708,7 +2664,7 @@ static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"Enable ULV Feature Failed!",
return -1);
@@ -2724,7 +2680,7 @@ static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.ulv_support) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
"disable ULV Feature Failed!",
return -EINVAL);
@@ -2740,7 +2696,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2748,7 +2704,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to Enable DS_SOCCLK Feature Failed!",
return -EINVAL);
@@ -2756,7 +2712,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to Enable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2764,7 +2720,7 @@ static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to Enable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2780,7 +2736,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DS_GFXCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
"Attempt to disable DS_GFXCLK Feature Failed!",
return -EINVAL);
@@ -2788,7 +2744,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_SOCCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
"Attempt to disable DS_ Feature Failed!",
return -EINVAL);
@@ -2796,7 +2752,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_LCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
"Attempt to disable DS_LCLK Feature Failed!",
return -EINVAL);
@@ -2804,7 +2760,7 @@ static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
}
if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
"Attempt to disable DS_DCEFCLK Feature Failed!",
return -EINVAL);
@@ -2822,7 +2778,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to disable LED DPM feature failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = false;
@@ -2840,7 +2796,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
+ vega10_enable_smc_features(hwmgr, false, feature_mask);
return 0;
}
@@ -2870,7 +2826,7 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
}
- if (vega10_enable_smc_features(hwmgr->smumgr,
+ if (vega10_enable_smc_features(hwmgr,
true, feature_mask)) {
for (i = 0; i < GNLD_DPM_MAX; i++) {
if (data->smu_features[i].smu_feature_bitmap &
@@ -2880,22 +2836,21 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
}
if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
"Attempt to Enable LED DPM feature Failed!", return -EINVAL);
data->smu_features[GNLD_LED_DISPLAY].enabled = true;
}
if (data->vbios_boot_state.bsoc_vddc_lock) {
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage, 0);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_Falcon_QuickTransition)) {
+ if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition)) {
if (data->smu_features[GNLD_ACDC].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
"Attempt to Enable DS_GFXCLK Feature Failed!",
return -1);
@@ -2912,13 +2867,13 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
(struct vega10_hwmgr *)(hwmgr->backend);
int tmp_result, result = 0;
- tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to configure telemetry!",
return tmp_result);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, 0);
tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
@@ -2936,8 +2891,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!",
result = tmp_result);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController)) {
+ if (PP_CAP(PHM_PlatformCaps_ThermalController)) {
tmp_result = vega10_enable_thermal_protection(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to enable thermal protection!",
@@ -3172,8 +3126,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
PP_ASSERT_WITH_CODE(
data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
@@ -3238,10 +3191,8 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
disable_mclk_switching_for_frame_lock = phm_cap_enabled(
hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
- disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_DisableMclkSwitchForVR);
- force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ForceMclkHigh);
+ disable_mclk_switching_for_vr = PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR);
+ force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh);
disable_mclk_switching = (info.display_count > 1) ||
disable_mclk_switching_for_frame_lock ||
@@ -3292,8 +3243,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
vega10_ps->performance_levels[1].mem_clock;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_StablePState)) {
+ if (PP_CAP(PHM_PlatformCaps_StablePState)) {
for (i = 0; i < vega10_ps->performance_level_count; i++) {
vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
@@ -3325,10 +3275,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
data->need_update_dpm_table = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@@ -3412,10 +3360,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
uint32_t dpm_count, clock_percent;
uint32_t i;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ODNinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) {
if (!data->need_update_dpm_table &&
!data->apply_optimized_settings &&
@@ -3480,10 +3426,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
dpm_table->
gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
value = sclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
/* Need to do calculation based on the golden DPM table
* as the Heatmap GPU Clock axis is also based on
* the default values
@@ -3537,10 +3481,8 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
mem_table.dpm_levels[dpm_table->mem_table.count - 1].
value = mclk;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinACSupport) ||
- phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_OD6PlusinDCSupport)) {
+ if (PP_CAP(PHM_PlatformCaps_OD6PlusinACSupport) ||
+ PP_CAP(PHM_PlatformCaps_OD6PlusinDCSupport)) {
PP_ASSERT_WITH_CODE(
golden_dpm_table->mem_table.dpm_levels
@@ -3732,7 +3674,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_boot_level !=
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
data->smc_state_table.gfx_boot_level),
"Failed to set soft min sclk index!",
@@ -3748,14 +3690,14 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
socclk_idx),
"Failed to set soft min uclk index!",
return -EINVAL);
} else {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
data->smc_state_table.mem_boot_level),
"Failed to set soft min uclk index!",
@@ -3780,7 +3722,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.gfx_max_level !=
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
data->smc_state_table.gfx_max_level),
"Failed to set soft max sclk index!",
@@ -3794,7 +3736,7 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
if (data->smc_state_table.mem_max_level !=
data->dpm_table.mem_table.dpm_state.soft_max_level) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
data->smc_state_table.mem_max_level),
"Failed to set soft max mclk index!",
@@ -3853,7 +3795,7 @@ int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_VCE].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
"Attempt to Enable/Disable DPM VCE Failed!",
@@ -3871,9 +3813,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_SclkThrottleLowNotification)
- && (hwmgr->gfx_arbiter.sclk_threshold !=
+ if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification) &&
+ (hwmgr->gfx_arbiter.sclk_threshold !=
data->low_sclk_interrupt_threshold)) {
data->low_sclk_interrupt_threshold =
hwmgr->gfx_arbiter.sclk_threshold;
@@ -3884,7 +3825,7 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
cpu_to_le32(low_sclk_interrupt_threshold);
/* This message will also enable SmcToHost Interrupt */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
(uint32_t)low_sclk_interrupt_threshold);
}
@@ -3920,7 +3861,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
"Failed to update SCLK threshold!",
result = tmp_result);
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)pp_table, PPTABLE);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload PPtable!", return result);
@@ -3931,7 +3872,7 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
return 0;
}
-static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3953,7 +3894,7 @@ static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
[vega10_ps->performance_level_count - 1].gfx_clock;
}
-static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
+static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
{
struct pp_power_state *ps;
struct vega10_power_state *vega10_ps;
@@ -3980,12 +3921,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
{
uint32_t value;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrPkgPwr),
"Failed to get current package power!",
return -EINVAL);
- vega10_read_arg_from_smc(hwmgr->smumgr, &value);
+ vega10_read_arg_from_smc(hwmgr, &value);
/* power value is an integer */
query->average_gpu_power = value << 8;
@@ -4002,25 +3943,25 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &sclk_idx);
*((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
+ vega10_read_arg_from_smc(hwmgr, &mclk_idx);
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
if (!ret) {
- vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
+ vega10_read_arg_from_smc(hwmgr, &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
}
@@ -4055,7 +3996,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
bool has_disp)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
has_disp ? 0 : 1);
}
@@ -4090,7 +4031,7 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
if (!result) {
clk_request = (clk_freq << 16) | clk_select;
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
clk_request);
}
@@ -4160,7 +4101,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
+ hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcefClockInSR /100),
"Attempt to set divider for DCEFCLK Failed!",);
} else {
@@ -4172,7 +4113,7 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4275,28 +4216,23 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
return 0;
}
-static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
+static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
- int result = 0;
-
switch (mode) {
case AMD_FAN_CTRL_NONE:
- result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
+ vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
break;
case AMD_FAN_CTRL_MANUAL:
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
- result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
break;
case AMD_FAN_CTRL_AUTO:
- result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
- if (!result)
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ if (!vega10_fan_ctrl_set_static_mode(hwmgr, mode))
+ vega10_fan_ctrl_start_smc_fan_control(hwmgr);
break;
default:
break;
}
- return result;
}
static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
@@ -4306,51 +4242,16 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
-
- if (level == hwmgr->dpm_level)
- return ret;
-
- if (!(hwmgr->dpm_level & profile_mode_mask)) {
- /* enter profile mode, save current level, disable gfx cg*/
- if (level & profile_mode_mask) {
- hwmgr->saved_dpm_level = hwmgr->dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
- }
- } else {
- /* exit profile mode, restore level, enable gfx cg*/
- if (!(level & profile_mode_mask)) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
- level = hwmgr->saved_dpm_level;
- cgs_set_clockgating_state(hwmgr->device,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- }
- }
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
ret = vega10_force_dpm_lowest(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
ret = vega10_unforce_dpm_levels(hwmgr);
- if (ret)
- return ret;
- hwmgr->dpm_level = level;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
@@ -4359,27 +4260,25 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- hwmgr->dpm_level = level;
vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
- hwmgr->dpm_level = level;
- break;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
-
- return 0;
+ if (!ret) {
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
+ else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
+ vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
+ }
+ return ret;
}
-static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
+static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -4624,7 +4523,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
int i;
- if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
+ if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
AMD_DPM_FORCED_LEVEL_LOW |
AMD_DPM_FORCED_LEVEL_HIGH))
return -EINVAL;
@@ -4697,11 +4596,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentGfxclkIndex),
"Attempt to get current sclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read sclk index Failed!",
return -1);
@@ -4715,11 +4614,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentUclkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4730,11 +4629,11 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_PCIE:
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentLinkIndex),
"Attempt to get current mclk index Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
&now),
"Attempt to read mclk index Failed!",
return -1);
@@ -4762,7 +4661,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if ((data->water_marks_bitmap & WaterMarksExist) &&
!(data->water_marks_bitmap & WaterMarksLoaded)) {
- result = vega10_copy_table_to_smc(hwmgr->smumgr,
+ result = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)wm_table, WMTABLE);
PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
data->water_marks_bitmap |= WaterMarksLoaded;
@@ -4771,7 +4670,7 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
cgs_get_active_displays_info(hwmgr->device, &info);
num_turned_on_displays = info.display_count;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
}
@@ -4784,7 +4683,7 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->smu_features[GNLD_DPM_UVD].supported) {
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
enable,
data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
"Attempt to Enable/Disable DPM UVD Failed!",
@@ -4794,20 +4693,20 @@ int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
return 0;
}
-static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->vce_power_gated = bgate;
- return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
+ vega10_enable_disable_vce_dpm(hwmgr, !bgate);
}
-static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
+static void vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
data->uvd_power_gated = bgate;
- return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
+ vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
}
static inline bool vega10_are_power_levels_equal(
@@ -4866,7 +4765,7 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg
if (data->display_timing.num_existing_displays != info.display_count)
is_update_required = true;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
+ if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) {
if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
is_update_required = true;
}
@@ -4883,8 +4782,7 @@ static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
"DPM is not running right now, no need to disable DPM!",
return 0);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_ThermalController))
+ if (PP_CAP(PHM_PlatformCaps_ThermalController))
vega10_disable_thermal_protection(hwmgr);
tmp_result = vega10_disable_power_containment(hwmgr);
@@ -4972,7 +4870,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.sclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
sclk_idx),
"Failed to set soft min sclk index!",
@@ -4983,7 +4881,7 @@ static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
if (!data->registry_data.mclk_dpm_key_disabled)
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
- hwmgr->smumgr,
+ hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
mclk_idx),
"Failed to set soft min mclk index!",
@@ -5096,6 +4994,38 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return 0;
}
+static int vega10_register_thermal_interrupt(struct pp_hwmgr *hwmgr,
+ const void *info)
+{
+ struct cgs_irq_src_funcs *irq_src =
+ (struct cgs_irq_src_funcs *)info;
+
+ if (hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 ||
+ hwmgr->thermal_controller.ucType ==
+ ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 0, 0, irq_src[0].set, irq_src[0].handler, hwmgr),
+ "Failed to register high thermal interrupt!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0xf, /* AMDGPU_IH_CLIENTID_THM */
+ 1, 0, irq_src[1].set, irq_src[1].handler, hwmgr),
+ "Failed to register low thermal interrupt!",
+ return -EINVAL);
+ }
+
+ /* Register CTF(GPIO_19) interrupt */
+ PP_ASSERT_WITH_CODE(!cgs_add_irq_source(hwmgr->device,
+ 0x16, /* AMDGPU_IH_CLIENTID_ROM_SMUIO, */
+ 83, 0, irq_src[2].set, irq_src[2].handler, hwmgr),
+ "Failed to register CTF thermal interrupt!",
+ return -EINVAL);
+
+ return 0;
+}
+
static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.backend_init = vega10_hwmgr_backend_init,
.backend_fini = vega10_hwmgr_backend_fini,
@@ -5149,12 +5079,13 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.get_mclk_od = vega10_get_mclk_od,
.set_mclk_od = vega10_set_mclk_od,
.avfs_control = vega10_avfs_enable,
+ .register_internal_thermal_interrupt = vega10_register_thermal_interrupt,
};
int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
{
hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
hwmgr->pptable_func = &vega10_pptable_funcs;
- pp_vega10_thermal_initialize(hwmgr);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index e7fa67063cdc..d2f695692f77 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -854,99 +854,79 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
uint32_t en = (enable ? 1 : 0);
uint32_t didt_block_info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
- data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_SQ_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~SQ_Enable_MASK;
didt_block_info |= en << SQ_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
- data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DB_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~DB_Enable_MASK;
didt_block_info |= en << DB_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
- data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TD_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TD_Enable_MASK;
didt_block_info |= en << TD_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
- data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_TCP_CTRL0, DIDT_CTRL_EN, en);
didt_block_info &= ~TCP_Enable_MASK;
didt_block_info |= en << TCP_Enable_SHIFT;
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
- data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0);
- data &= ~DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK;
- data |= ((en << DIDT_DBR_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DBR_CTRL0__DIDT_CTRL_EN_MASK);
- cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_CTRL0, data);
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
+ CGS_WREG32_FIELD_IND(hwmgr->device, CGS_IND_REG__DIDT,
+ DIDT_DBR_CTRL0, DIDT_CTRL_EN, en);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable)) {
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) {
+ if (PP_CAP(PHM_PlatformCaps_SQRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_SQ_EDC_CTRL__EDC_EN__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_SQ_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_SQ_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL);
- data &= ~DIDT_DB_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DB_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DB_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DB_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DB_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TDRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL);
- data &= ~DIDT_TD_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TD_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TD_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TD_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TD_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_TCPRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_TCP_EDC_CTRL__EDC_EN__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_TCP_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_TCP_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping)) {
+ if (PP_CAP(PHM_PlatformCaps_DBRRamping)) {
data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_EN_MASK;
- data |= ((en << DIDT_DBR_EDC_CTRL__EDC_EN__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_EN_MASK);
- data &= ~DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK;
- data |= ((~en << DIDT_DBR_EDC_CTRL__EDC_SW_RST__SHIFT) & DIDT_DBR_EDC_CTRL__EDC_SW_RST_MASK);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en);
+ data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data);
}
}
if (enable) {
/* For Vega10, SMC does not support any mask yet. */
- result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
PP_ASSERT((0 == result), "[EnableDiDtConfig] SMC Configure Gfx Didt Failed!");
}
}
@@ -1040,10 +1020,10 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC))
+ if (PP_CAP(PHM_PlatformCaps_GCEDC))
vega10_program_gc_didt_config_registers(hwmgr, GCDiDtCtrl0Config_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1059,12 +1039,12 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_DIDT_CTRL0, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1159,12 +1139,12 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlResetConfig_vega10);
vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCCtrlConfig_vega10);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMInitConfig_vega10);
return 0;
@@ -1180,12 +1160,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
cgs_enter_safe_mode(hwmgr->device, false);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC)) {
+ if (PP_CAP(PHM_PlatformCaps_GCEDC)) {
data = 0x00000000;
cgs_write_register(hwmgr->device, mmGC_EDC_CTRL, data);
}
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM))
+ if (PP_CAP(PHM_PlatformCaps_PSM))
vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10);
return 0;
@@ -1263,7 +1243,7 @@ int vega10_enable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr->smumgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ PP_ASSERT_WITH_CODE((!vega10_enable_smc_features(hwmgr, true, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
"[EnableDiDtConfig] Attempt to Enable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = true;
}
@@ -1310,7 +1290,7 @@ int vega10_disable_didt_config(struct pp_hwmgr *hwmgr)
}
if (0 == result) {
- PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr->smumgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
+ PP_ASSERT_WITH_CODE((0 != vega10_enable_smc_features(hwmgr, false, data->smu_features[GNLD_DIDT].smu_feature_bitmap)),
"[DisableDiDtConfig] Attempt to Disable DiDt feature Failed!", return result);
data->smu_features[GNLD_DIDT].enabled = false;
}
@@ -1364,7 +1344,7 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
(struct vega10_hwmgr *)(hwmgr->backend);
if (data->registry_data.enable_pkg_pwr_tracking_feature)
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetPptLimit, n);
return 0;
@@ -1381,16 +1361,15 @@ int vega10_enable_power_containment(struct pp_hwmgr *hwmgr)
(uint32_t)(tdp_table->usMaximumPowerDeliveryLimit);
int result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to enable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1409,16 +1388,15 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data =
(struct vega10_hwmgr *)(hwmgr->backend);
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->smu_features[GNLD_PPT].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_PPT].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_PPT].supported = false);
if (data->smu_features[GNLD_TDC].supported)
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false, data->smu_features[GNLD_TDC].smu_feature_bitmap),
"Attempt to disable PPT feature Failed!",
data->smu_features[GNLD_TDC].supported = false);
@@ -1430,7 +1408,7 @@ int vega10_disable_power_containment(struct pp_hwmgr *hwmgr)
static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
- return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
}
@@ -1438,8 +1416,7 @@ int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
{
int adjust_percent, result = 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_PowerContainment)) {
+ if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
adjust_percent =
hwmgr->platform_descriptor.TDPAdjustmentPolarity ?
hwmgr->platform_descriptor.TDPAdjustment :
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index d44243441d28..1feefac49ea9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,11 +31,11 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetCurrentRpm),
"Attempt to get current RPM from SMC Failed!",
return -1);
- PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr,
current_rpm),
"Attempt to read current RPM from SMC Failed!",
return -1);
@@ -54,8 +54,7 @@ int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
fan_speed_info->min_percent = 0;
fan_speed_info->max_percent = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
+ if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM) &&
hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution) {
fan_speed_info->supports_rpm_read = true;
@@ -105,14 +104,15 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return -1;
- if (data->smu_features[GNLD_FAN_CONTROL].supported)
+ if (data->smu_features[GNLD_FAN_CONTROL].supported) {
result = vega10_get_current_rpm(hwmgr, speed);
- else {
+ } else {
uint32_t reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
- tach_period = (cgs_read_register(hwmgr->device,
- reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >>
- CG_TACH_STATUS__TACH_PERIOD__SHIFT;
+ tach_period =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS,
+ TACH_PERIOD);
if (tach_period == 0)
return -EINVAL;
@@ -141,23 +141,20 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
if (hwmgr->fan_ctrl_is_in_default_mode) {
hwmgr->fan_ctrl_default_mode =
- (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >>
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
- hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL2__TMIN_MASK) >>
- CG_FDO_CTRL2__TMIN__SHIFT;
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE);
+ hwmgr->tmin =
+ CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN);
hwmgr->fan_ctrl_is_in_default_mode = false;
}
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (0 << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN, 0));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
return 0;
}
@@ -176,14 +173,13 @@ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
if (!hwmgr->fan_ctrl_is_in_default_mode) {
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) |
- (hwmgr->fan_ctrl_default_mode <<
- CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, FDO_PWM_MODE,
+ hwmgr->fan_ctrl_default_mode));
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TMIN_MASK) |
- (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TMIN,
+ hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT));
hwmgr->fan_ctrl_is_in_default_mode = true;
}
@@ -203,7 +199,7 @@ static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, true,
+ hwmgr, true,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -220,7 +216,7 @@ static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FAN_CONTROL].supported) {
PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(
- hwmgr->smumgr, false,
+ hwmgr, false,
data->smu_features[GNLD_FAN_CONTROL].
smu_feature_bitmap),
"Attempt to Enable FAN CONTROL feature Failed!",
@@ -279,16 +275,14 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
if (speed > 100)
speed = 100;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1);
- duty100 = (cgs_read_register(hwmgr->device, reg) &
- CG_FDO_CTRL1__FMAX_DUTY100_MASK) >>
- CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
+ duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL1, FMAX_DUTY100);
if (duty100 == 0)
return -EINVAL;
@@ -300,9 +294,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) |
- (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC);
}
@@ -314,18 +307,13 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
*/
int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr)
{
- int result;
-
if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
- result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- } else
- result = vega10_fan_ctrl_set_default_mode(hwmgr);
-
- return result;
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
+ return vega10_fan_ctrl_start_smc_fan_control(hwmgr);
+ else
+ return vega10_fan_ctrl_set_default_mode(hwmgr);
}
/**
@@ -342,12 +330,11 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
uint32_t reg;
if (hwmgr->thermal_controller.fanInfo.bNoFan ||
- (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
- (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
+ (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) ||
+ (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM))
return -1;
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl))
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
if (!result) {
@@ -356,9 +343,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_STATUS__TACH_PERIOD_MASK) |
- (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_STATUS, TACH_PERIOD,
+ tach_period));
}
return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM);
}
@@ -374,7 +361,7 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
uint32_t reg;
reg = soc15_get_register_offset(THM_HWID, 0,
- mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
+ mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS);
temp = cgs_read_register(hwmgr->device, reg);
@@ -418,20 +405,10 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
val = cgs_read_register(hwmgr->device, reg);
- val &= (~THM_THERMAL_INT_CTRL__MAX_IH_CREDIT_MASK);
- val |= (5 << THM_THERMAL_INT_CTRL__MAX_IH_CREDIT__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA_MASK);
- val |= (1 << THM_THERMAL_INT_CTRL__THERM_IH_HW_ENA__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK);
- val |= ((high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT);
-
- val &= (~THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
- val |= ((low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
- << THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
-
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
cgs_write_register(hwmgr->device, reg, val);
@@ -452,19 +429,16 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_TACH_CTRL__EDGE_PER_REV_MASK) |
- ((hwmgr->thermal_controller.fanInfo.
- ucTachometerPulsesPerRevolution - 1) <<
- CG_TACH_CTRL__EDGE_PER_REV__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_TACH_CTRL, EDGE_PER_REV,
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1));
}
reg = soc15_get_register_offset(THM_HWID, 0,
mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2);
cgs_write_register(hwmgr->device, reg,
- (cgs_read_register(hwmgr->device, reg) &
- ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) |
- (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT));
+ CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg),
+ CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28));
return 0;
}
@@ -484,7 +458,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_FW_CTF].enabled)
printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
true,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to Enable FW CTF feature Failed!",
@@ -516,7 +490,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr)
printk("[Thermal_EnableAlert] FW CTF Already disabled!\n");
- PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
false,
data->smu_features[GNLD_FW_CTF].smu_feature_bitmap),
"Attempt to disable FW CTF feature Failed!",
@@ -554,8 +528,7 @@ int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr)
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
int ret;
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
@@ -573,7 +546,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanTargetTemperature = hwmgr->thermal_controller.
advanceFanControlParameters.usTMax;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
(uint32_t)table->FanTargetTemperature);
@@ -602,7 +575,7 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
table->FanStartTemp = hwmgr->thermal_controller.
advanceFanControlParameters.usZeroRPMStartTemperature;
- ret = vega10_copy_table_to_smc(hwmgr->smumgr,
+ ret = vega10_copy_table_to_smc(hwmgr,
(uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE);
if (ret)
pr_info("Failed to update Fan Control Table in PPTable!");
@@ -619,123 +592,50 @@ int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
* @param Result the last failure code
* @return result from set temperature range routine
*/
-int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr)
{
/* If the fantable setup has failed we could have disabled
* PHM_PlatformCaps_MicrocodeFanControl even after
* this function was included in the table.
* Make sure that we still think controlling the fan is OK.
*/
- if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl)) {
+ if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl))
vega10_fan_ctrl_start_smc_fan_control(hwmgr);
- }
return 0;
}
-/**
-* Set temperature range for high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from set temperature range routine
-*/
-int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+
+int vega10_start_thermal_controller(struct pp_hwmgr *hwmgr,
+ struct PP_TemperatureRange *range)
{
- struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input;
+ int ret = 0;
if (range == NULL)
return -EINVAL;
- return vega10_thermal_set_temperature_range(hwmgr, range);
-}
-
-/**
-* Programs one-time setting registers
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from initialize thermal controller routine
-*/
-int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_initialize(hwmgr);
-}
-
-/**
-* Enable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from enable alert routine
-*/
-int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_enable_alert(hwmgr);
-}
-
-/**
-* Disable high and low alerts
-* @param hwmgr the address of the powerplay hardware manager.
-* @param pInput the pointer to input data
-* @param pOutput the pointer to output data
-* @param pStorage the pointer to temporary storage
-* @param Result the last failure code
-* @return result from disable alert routine
-*/
-static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- return vega10_thermal_disable_alert(hwmgr);
-}
+ vega10_thermal_initialize(hwmgr);
+ ret = vega10_thermal_set_temperature_range(hwmgr, range);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_item
-vega10_thermal_start_thermal_controller_master_list[] = {
- { .tableFunction = tf_vega10_thermal_initialize },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
+ vega10_thermal_enable_alert(hwmgr);
/* We should restrict performance levels to low before we halt the SMC.
* On the other hand we are still in boot state when we do this
* so it would be pointless.
* If this assumption changes we have to revisit this table.
*/
- { .tableFunction = tf_vega10_thermal_setup_fan_table },
- { .tableFunction = tf_vega10_thermal_start_smc_fan_control },
- { }
-};
+ ret = vega10_thermal_setup_fan_table(hwmgr);
+ if (ret)
+ return -EINVAL;
-static struct phm_master_table_header
-vega10_thermal_start_thermal_controller_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_start_thermal_controller_master_list
-};
+ vega10_thermal_start_smc_fan_control(hwmgr);
-static struct phm_master_table_item
-vega10_thermal_set_temperature_range_master_list[] = {
- { .tableFunction = tf_vega10_thermal_disable_alert },
- { .tableFunction = tf_vega10_thermal_set_temperature_range },
- { .tableFunction = tf_vega10_thermal_enable_alert },
- { }
+ return 0;
};
-struct phm_master_table_header
-vega10_thermal_set_temperature_range_master = {
- 0,
- PHM_MasterTableFlag_None,
- vega10_thermal_set_temperature_range_master_list
-};
+
+
int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
{
@@ -745,32 +645,3 @@ int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr)
}
return 0;
}
-
-/**
-* Initializes the thermal controller related functions
-* in the Hardware Manager structure.
-* @param hwmgr The address of the hardware manager.
-* @exception Any error code from the low-level communication.
-*/
-int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr)
-{
- int result;
-
- result = phm_construct_table(hwmgr,
- &vega10_thermal_set_temperature_range_master,
- &(hwmgr->set_temperature_range));
-
- if (!result) {
- result = phm_construct_table(hwmgr,
- &vega10_thermal_start_thermal_controller_master,
- &(hwmgr->start_thermal_controller));
- if (result)
- phm_destroy_table(hwmgr,
- &(hwmgr->set_temperature_range));
- }
-
- if (!result)
- hwmgr->fan_ctrl_is_in_default_mode = true;
- return result;
-}
-
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
index 776f3a2effc0..f34ce04cfd89 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h
@@ -50,13 +50,6 @@ struct vega10_temperature {
#define FDO_PWM_MODE_STATIC_RPM 5
-extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-
extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
@@ -69,7 +62,6 @@ extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint32_t speed);
extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr);
-extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_ctrl_uninitialize_thermal_controller(
struct pp_hwmgr *hwmgr);
extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
@@ -77,9 +69,10 @@ extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr,
extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr,
uint32_t *speed);
extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr);
-extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
extern int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr);
-int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+extern int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr);
+
+extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 07e9c0b5915d..435da2647727 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -31,7 +31,7 @@
#include "dm_pp_interface.h"
extern const struct amd_ip_funcs pp_ip_funcs;
-extern const struct amd_powerplay_funcs pp_dpm_funcs;
+extern const struct amd_pm_funcs pp_dpm_funcs;
#define PP_DPM_DISABLED 0xCCCC
@@ -50,94 +50,12 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_GPU_POWER,
};
-enum amd_pp_event {
- AMD_PP_EVENT_INITIALIZE = 0,
- AMD_PP_EVENT_UNINITIALIZE,
- AMD_PP_EVENT_POWER_SOURCE_CHANGE,
- AMD_PP_EVENT_SUSPEND,
- AMD_PP_EVENT_RESUME,
- AMD_PP_EVENT_ENTER_REST_STATE,
- AMD_PP_EVENT_EXIT_REST_STATE,
- AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_THERMAL_NOTIFICATION,
- AMD_PP_EVENT_VBIOS_NOTIFICATION,
- AMD_PP_EVENT_ENTER_THERMAL_STATE,
- AMD_PP_EVENT_EXIT_THERMAL_STATE,
- AMD_PP_EVENT_ENTER_FORCED_STATE,
- AMD_PP_EVENT_EXIT_FORCED_STATE,
- AMD_PP_EVENT_ENTER_EXCLUSIVE_MODE,
- AMD_PP_EVENT_EXIT_EXCLUSIVE_MODE,
- AMD_PP_EVENT_ENTER_SCREEN_SAVER,
- AMD_PP_EVENT_EXIT_SCREEN_SAVER,
- AMD_PP_EVENT_VPU_RECOVERY_BEGIN,
- AMD_PP_EVENT_VPU_RECOVERY_END,
- AMD_PP_EVENT_ENABLE_POWER_PLAY,
- AMD_PP_EVENT_DISABLE_POWER_PLAY,
- AMD_PP_EVENT_CHANGE_POWER_SOURCE_UI_LABEL,
- AMD_PP_EVENT_ENABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER2D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_DISABLE_USER3D_PERFORMANCE,
- AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST,
- AMD_PP_EVENT_ENABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_DISABLE_REDUCED_REFRESH_RATE,
- AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING,
- AMD_PP_EVENT_ENABLE_CGPG,
- AMD_PP_EVENT_DISABLE_CGPG,
- AMD_PP_EVENT_ENTER_TEXT_MODE,
- AMD_PP_EVENT_EXIT_TEXT_MODE,
- AMD_PP_EVENT_VIDEO_START,
- AMD_PP_EVENT_VIDEO_STOP,
- AMD_PP_EVENT_ENABLE_USER_STATE,
- AMD_PP_EVENT_DISABLE_USER_STATE,
- AMD_PP_EVENT_READJUST_POWER_STATE,
- AMD_PP_EVENT_START_INACTIVITY,
- AMD_PP_EVENT_STOP_INACTIVITY,
- AMD_PP_EVENT_LINKED_ADAPTERS_READY,
- AMD_PP_EVENT_ADAPTER_SAFE_TO_DISABLE,
- AMD_PP_EVENT_COMPLETE_INIT,
- AMD_PP_EVENT_CRITICAL_THERMAL_FAULT,
- AMD_PP_EVENT_BACKLIGHT_CHANGED,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT,
- AMD_PP_EVENT_ENABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_DISABLE_VARI_BRIGHT_ON_POWER_XPRESS,
- AMD_PP_EVENT_SET_VARI_BRIGHT_LEVEL,
- AMD_PP_EVENT_VARI_BRIGHT_MONITOR_MEASUREMENT,
- AMD_PP_EVENT_SCREEN_ON,
- AMD_PP_EVENT_SCREEN_OFF,
- AMD_PP_EVENT_PRE_DISPLAY_CONFIG_CHANGE,
- AMD_PP_EVENT_ENTER_ULP_STATE,
- AMD_PP_EVENT_EXIT_ULP_STATE,
- AMD_PP_EVENT_REGISTER_IP_STATE,
- AMD_PP_EVENT_UNREGISTER_IP_STATE,
- AMD_PP_EVENT_ENTER_MGPU_MODE,
- AMD_PP_EVENT_EXIT_MGPU_MODE,
- AMD_PP_EVENT_ENTER_MULTI_GPU_MODE,
- AMD_PP_EVENT_PRE_SUSPEND,
- AMD_PP_EVENT_PRE_RESUME,
- AMD_PP_EVENT_ENTER_BACOS,
- AMD_PP_EVENT_EXIT_BACOS,
- AMD_PP_EVENT_RESUME_BACO,
- AMD_PP_EVENT_RESET_BACO,
- AMD_PP_EVENT_PRE_DISPLAY_PHY_ACCESS,
- AMD_PP_EVENT_POST_DISPLAY_PHY_CCESS,
- AMD_PP_EVENT_START_COMPUTE_APPLICATION,
- AMD_PP_EVENT_STOP_COMPUTE_APPLICATION,
- AMD_PP_EVENT_REDUCE_POWER_LIMIT,
- AMD_PP_EVENT_ENTER_FRAME_LOCK,
- AMD_PP_EVENT_EXIT_FRAME_LOOCK,
- AMD_PP_EVENT_LONG_IDLE_REQUEST_BACO,
- AMD_PP_EVENT_LONG_IDLE_ENTER_BACO,
- AMD_PP_EVENT_LONG_IDLE_EXIT_BACO,
- AMD_PP_EVENT_HIBERNATE,
- AMD_PP_EVENT_CONNECTED_STANDBY,
- AMD_PP_EVENT_ENTER_SELF_REFRESH,
- AMD_PP_EVENT_EXIT_SELF_REFRESH,
- AMD_PP_EVENT_START_AVFS_BTC,
- AMD_PP_EVENT_MAX
+enum amd_pp_task {
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ AMD_PP_TASK_COMPLETE_INIT,
+ AMD_PP_TASK_MAX
};
struct amd_pp_init {
@@ -295,12 +213,6 @@ enum {
PP_GROUP_MAX
};
-enum pp_clock_type {
- PP_SCLK,
- PP_MCLK,
- PP_PCIE,
-};
-
struct pp_states_info {
uint32_t nums;
uint32_t states[16];
@@ -355,49 +267,10 @@ struct pp_display_clock_request {
support << PP_STATE_SUPPORT_SHIFT |\
state << PP_STATE_SHIFT)
-struct amd_powerplay_funcs {
- int (*get_temperature)(void *handle);
- int (*load_firmware)(void *handle);
- int (*wait_for_fw_loading_complete)(void *handle);
- int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
- enum amd_dpm_forced_level (*get_performance_level)(void *handle);
- enum amd_pm_state_type (*get_current_power_state)(void *handle);
- int (*get_sclk)(void *handle, bool low);
- int (*get_mclk)(void *handle, bool low);
- int (*powergate_vce)(void *handle, bool gate);
- int (*powergate_uvd)(void *handle, bool gate);
- int (*dispatch_tasks)(void *handle, enum amd_pp_event event_id,
- void *input, void *output);
- int (*set_fan_control_mode)(void *handle, uint32_t mode);
- int (*get_fan_control_mode)(void *handle);
- int (*set_fan_speed_percent)(void *handle, uint32_t percent);
- int (*get_fan_speed_percent)(void *handle, uint32_t *speed);
- int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
- int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
- int (*get_pp_table)(void *handle, char **table);
- int (*set_pp_table)(void *handle, const char *buf, size_t size);
- int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
- int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
- int (*get_sclk_od)(void *handle);
- int (*set_sclk_od)(void *handle, uint32_t value);
- int (*get_mclk_od)(void *handle);
- int (*set_mclk_od)(void *handle, uint32_t value);
- int (*read_sensor)(void *handle, int idx, void *value, int *size);
- struct amd_vce_state* (*get_vce_clock_state)(void *handle, unsigned idx);
- int (*reset_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*get_power_profile_state)(void *handle,
- struct amd_pp_profile *query);
- int (*set_power_profile_state)(void *handle,
- struct amd_pp_profile *request);
- int (*switch_power_profile)(void *handle,
- enum amd_pp_profile_type type);
-};
-
struct amd_powerplay {
void *pp_handle;
const struct amd_ip_funcs *ip_funcs;
- const struct amd_powerplay_funcs *pp_funcs;
+ const struct amd_pm_funcs *pp_funcs;
};
int amd_powerplay_create(struct amd_pp_init *pp_init,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h b/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
deleted file mode 100644
index b9d84de8a44d..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmanager.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#ifndef _EVENT_MANAGER_H_
-#define _EVENT_MANAGER_H_
-
-#include "power_state.h"
-#include "pp_power_source.h"
-#include "hardwaremanager.h"
-#include "pp_asicblocks.h"
-
-struct pp_eventmgr;
-enum amd_pp_event;
-
-enum PEM_EventDataValid {
- PEM_EventDataValid_RequestedStateID = 0,
- PEM_EventDataValid_RequestedUILabel,
- PEM_EventDataValid_NewPowerState,
- PEM_EventDataValid_RequestedPowerSource,
- PEM_EventDataValid_RequestedClocks,
- PEM_EventDataValid_CurrentTemperature,
- PEM_EventDataValid_AsicBlocks,
- PEM_EventDataValid_ODParameters,
- PEM_EventDataValid_PXAdapterPrefs,
- PEM_EventDataValid_PXUserPrefs,
- PEM_EventDataValid_PXSwitchReason,
- PEM_EventDataValid_PXSwitchPhase,
- PEM_EventDataValid_HdVideo,
- PEM_EventDataValid_BacklightLevel,
- PEM_EventDatavalid_VariBrightParams,
- PEM_EventDataValid_VariBrightLevel,
- PEM_EventDataValid_VariBrightImmediateChange,
- PEM_EventDataValid_PercentWhite,
- PEM_EventDataValid_SdVideo,
- PEM_EventDataValid_HTLinkChangeReason,
- PEM_EventDataValid_HWBlocks,
- PEM_EventDataValid_RequestedThermalState,
- PEM_EventDataValid_MvcVideo,
- PEM_EventDataValid_Max
-};
-
-typedef enum PEM_EventDataValid PEM_EventDataValid;
-
-/* Number of bits in ULONG variable */
-#define PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD (sizeof(unsigned long)*8)
-
-/* Number of ULONG entries used by event data valid bits */
-#define PEM_MAX_NUM_EVENTDATAVALID_ULONG_ENTRIES \
- ((PEM_EventDataValid_Max + PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD - 1) / \
- PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD)
-
-static inline void pem_set_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] |=
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline void pem_unset_event_data_valid(unsigned long *fields, PEM_EventDataValid valid_field)
-{
- fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &=
- ~(1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-static inline unsigned long pem_is_event_data_valid(const unsigned long *fields, PEM_EventDataValid valid_field)
-{
- return fields[valid_field / PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD] &
- (1UL << (valid_field % PEM_MAX_NUM_EVENTDATAVALID_BITS_PER_FIELD));
-}
-
-struct pem_event_data {
- unsigned long valid_fields[100];
- unsigned long requested_state_id;
- enum PP_StateUILabel requested_ui_label;
- struct pp_power_state *pnew_power_state;
- enum pp_power_source requested_power_source;
- struct PP_Clocks requested_clocks;
- bool skip_state_adjust_rules;
- struct phm_asic_blocks asic_blocks;
- /* to doPP_ThermalState requestedThermalState;
- enum ThermalStateRequestSrc requestThermalStateSrc;
- PP_Temperature currentTemperature;*/
-
-};
-
-int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event,
- struct pem_event_data *event_data);
-
-bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr);
-
-#endif /* _EVENT_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h b/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
deleted file mode 100644
index 7bd8a7e57080..000000000000
--- a/drivers/gpu/drm/amd/powerplay/inc/eventmgr.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _EVENTMGR_H_
-#define _EVENTMGR_H_
-
-#include <linux/mutex.h>
-#include "pp_instance.h"
-#include "hardwaremanager.h"
-#include "eventmanager.h"
-#include "pp_feature.h"
-#include "pp_power_source.h"
-#include "power_state.h"
-
-typedef int (*pem_event_action)(struct pp_eventmgr *eventmgr,
- struct pem_event_data *event_data);
-
-struct action_chain {
- const char *description; /* action chain description for debugging purpose */
- const pem_event_action * const *action_chain; /* pointer to chain of event actions */
-};
-
-struct pem_power_source_ui_state_info {
- enum PP_StateUILabel current_ui_label;
- enum PP_StateUILabel default_ui_lable;
- unsigned long configurable_ui_mapping;
-};
-
-struct pp_clock_range {
- uint32_t min_sclk_khz;
- uint32_t max_sclk_khz;
-
- uint32_t min_mclk_khz;
- uint32_t max_mclk_khz;
-
- uint32_t min_vclk_khz;
- uint32_t max_vclk_khz;
-
- uint32_t min_dclk_khz;
- uint32_t max_dclk_khz;
-
- uint32_t min_aclk_khz;
- uint32_t max_aclk_khz;
-
- uint32_t min_eclk_khz;
- uint32_t max_eclk_khz;
-};
-
-enum pp_state {
- UNINITIALIZED,
- INACTIVE,
- ACTIVE
-};
-
-enum pp_ring_index {
- PP_RING_TYPE_GFX_INDEX = 0,
- PP_RING_TYPE_DMA_INDEX,
- PP_RING_TYPE_DMA1_INDEX,
- PP_RING_TYPE_UVD_INDEX,
- PP_RING_TYPE_VCE0_INDEX,
- PP_RING_TYPE_VCE1_INDEX,
- PP_RING_TYPE_CP1_INDEX,
- PP_RING_TYPE_CP2_INDEX,
- PP_NUM_RINGS,
-};
-
-struct pp_request {
- uint32_t flags;
- uint32_t sclk;
- uint32_t sclk_throttle;
- uint32_t mclk;
- uint32_t vclk;
- uint32_t dclk;
- uint32_t eclk;
- uint32_t aclk;
- uint32_t iclk;
- uint32_t vp8clk;
- uint32_t rsv[32];
-};
-
-struct pp_eventmgr {
- struct pp_hwmgr *hwmgr;
- struct pp_smumgr *smumgr;
-
- struct pp_feature_info features[PP_Feature_Max];
- const struct action_chain *event_chain[AMD_PP_EVENT_MAX];
- struct phm_platform_descriptor *platform_descriptor;
- struct pp_clock_range clock_range;
- enum pp_power_source current_power_source;
- struct pem_power_source_ui_state_info ui_state_info[PP_PowerSource_Max];
- enum pp_state states[PP_NUM_RINGS];
- struct pp_request hi_req;
- struct list_head context_list;
- struct mutex lock;
- bool block_adjust_power_state;
- bool enable_cg;
- bool enable_gfx_cgpg;
- int (*pp_eventmgr_init)(struct pp_eventmgr *eventmgr);
- void (*pp_eventmgr_fini)(struct pp_eventmgr *eventmgr);
-};
-
-int eventmgr_early_init(struct pp_instance *handle);
-
-#endif /* _EVENTMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a4c8b09b6f14..57a0467b7267 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -283,6 +283,8 @@ static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps
(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1)))));
}
+#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c))
+
#define PP_PCIEGenInvalid 0xffff
enum PP_PCIEGen {
PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */
@@ -295,7 +297,7 @@ typedef enum PP_PCIEGen PP_PCIEGen;
#define PP_Min_PCIEGen PP_PCIEGen1
#define PP_Max_PCIEGen PP_PCIEGen3
#define PP_Min_PCIELane 1
-#define PP_Max_PCIELane 32
+#define PP_Max_PCIELane 16
enum phm_clock_Type {
PHM_DispClock = 1,
@@ -373,8 +375,6 @@ struct phm_odn_clock_levels {
extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr);
extern int phm_enable_clock_power_gatings(struct pp_hwmgr *hwmgr);
-extern int phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool gate);
-extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 91b0105e8240..126b44d47a99 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -32,6 +32,7 @@
#include "ppatomctrl.h"
#include "hwmgr_ppt.h"
#include "power_state.h"
+#include "cgs_linux.h"
struct pp_instance;
struct pp_hwmgr;
@@ -61,10 +62,6 @@ struct vi_dpm_table {
struct vi_dpm_level dpm_level[1];
};
-enum PP_Result {
- PP_Result_TableImmediateExit = 0x13,
-};
-
#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
#define PCIE_PERF_REQ_GEN1 2
@@ -103,17 +100,6 @@ enum PHM_BackEnd_Magic {
PHM_Rv_Magic = 0x20161121
};
-
-#define PHM_PCIE_POWERGATING_TARGET_GFX 0
-#define PHM_PCIE_POWERGATING_TARGET_DDI 1
-#define PHM_PCIE_POWERGATING_TARGET_PLLCASCADE 2
-#define PHM_PCIE_POWERGATING_TARGET_PHY 3
-
-typedef int (*phm_table_function)(struct pp_hwmgr *hwmgr, void *input,
- void *output, void *storage, int result);
-
-typedef bool (*phm_check_function)(struct pp_hwmgr *hwmgr);
-
struct phm_set_power_state_input {
const struct pp_hw_power_state *pcurrent_state;
const struct pp_hw_power_state *pnew_state;
@@ -149,30 +135,6 @@ struct phm_gfx_arbiter {
uint32_t fclk;
};
-/* Entries in the master tables */
-struct phm_master_table_item {
- phm_check_function isFunctionNeededInRuntimeTable;
- phm_table_function tableFunction;
-};
-
-enum phm_master_table_flag {
- PHM_MasterTableFlag_None = 0,
- PHM_MasterTableFlag_ExitOnError = 1,
-};
-
-/* The header of the master tables */
-struct phm_master_table_header {
- uint32_t storage_size;
- uint32_t flags;
- const struct phm_master_table_item *master_list;
-};
-
-struct phm_runtime_table_header {
- uint32_t storage_size;
- bool exit_error;
- phm_table_function *function_list;
-};
-
struct phm_clock_array {
uint32_t count;
uint32_t values[1];
@@ -216,19 +178,6 @@ struct phm_phase_shedding_limits_record {
uint32_t Mclk;
};
-
-extern int phm_dispatch_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table,
- void *input, void *output);
-
-extern int phm_construct_table(struct pp_hwmgr *hwmgr,
- const struct phm_master_table_header *master_table,
- struct phm_runtime_table_header *rt_table);
-
-extern int phm_destroy_table(struct pp_hwmgr *hwmgr,
- struct phm_runtime_table_header *rt_table);
-
-
struct phm_uvd_clock_voltage_dependency_record {
uint32_t vclk;
uint32_t dclk;
@@ -286,6 +235,39 @@ struct phm_vce_clock_voltage_dependency_table {
struct phm_vce_clock_voltage_dependency_record entries[1];
};
+struct pp_smumgr_func {
+ int (*smu_init)(struct pp_hwmgr *hwmgr);
+ int (*smu_fini)(struct pp_hwmgr *hwmgr);
+ int (*start_smu)(struct pp_hwmgr *hwmgr);
+ int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr);
+ int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr,
+ uint32_t firmware);
+ int (*get_argument)(struct pp_hwmgr *hwmgr);
+ int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg);
+ int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+ int (*download_pptable_settings)(struct pp_hwmgr *hwmgr,
+ void **table);
+ int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr);
+ int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
+ int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
+ int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
+ int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
+ int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
+ int (*init_smc_table)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
+ int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
+ int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
+ uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
+ uint32_t (*get_mac_definition)(uint32_t value);
+ bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
+ int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request);
+ bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr);
+};
+
struct pp_hwmgr_func {
int (*backend_init)(struct pp_hwmgr *hw_mgr);
int (*backend_fini)(struct pp_hwmgr *hw_mgr);
@@ -311,10 +293,10 @@ struct pp_hwmgr_func {
unsigned long, struct pp_power_state *);
int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr);
int (*powerdown_uvd)(struct pp_hwmgr *hwmgr);
- int (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
- int (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
- int (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
+ void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate);
+ void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate);
+ uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low);
+ uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low);
int (*power_state_set)(struct pp_hwmgr *hwmgr,
const void *state);
int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr);
@@ -328,8 +310,8 @@ struct pp_hwmgr_func {
int (*get_temperature)(struct pp_hwmgr *hwmgr);
int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr);
int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info);
- int (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
- int (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
+ void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode);
+ uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr);
int (*set_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t percent);
int (*get_fan_speed_percent)(struct pp_hwmgr *hwmgr, uint32_t *speed);
int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t percent);
@@ -378,6 +360,9 @@ struct pp_hwmgr_func {
struct amd_pp_profile *request);
int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable);
int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr);
+ int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count);
+ int (*set_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock);
+ int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range);
};
struct pp_table_func {
@@ -745,7 +730,7 @@ struct pp_hwmgr {
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
- bool block_hw_access;
+ enum amd_dpm_forced_level request_dpm_level;
struct phm_gfx_arbiter gfx_arbiter;
struct phm_acp_arbiter acp_arbiter;
struct phm_uvd_arbiter uvd_arbiter;
@@ -754,19 +739,17 @@ struct pp_hwmgr {
void *pptable;
struct phm_platform_descriptor platform_descriptor;
void *backend;
+
+ void *smu_backend;
+ const struct pp_smumgr_func *smumgr_funcs;
+ bool is_kicker;
+ bool reload_fw;
+
enum PP_DAL_POWERLEVEL dal_power_level;
struct phm_dynamic_state_info dyn_state;
- struct phm_runtime_table_header setup_asic;
- struct phm_runtime_table_header power_down_asic;
- struct phm_runtime_table_header disable_dynamic_state_management;
- struct phm_runtime_table_header enable_dynamic_state_management;
- struct phm_runtime_table_header set_power_state;
- struct phm_runtime_table_header enable_clock_power_gatings;
- struct phm_runtime_table_header display_configuration_changed;
- struct phm_runtime_table_header start_thermal_controller;
- struct phm_runtime_table_header set_temperature_range;
const struct pp_hwmgr_func *hwmgr_func;
const struct pp_table_func *pptable_func;
+
struct pp_power_state *ps;
enum pp_power_source power_source;
uint32_t num_ps;
@@ -784,26 +767,44 @@ struct pp_hwmgr {
struct amd_pp_display_configuration display_config;
uint32_t feature_mask;
- /* power profile */
+ /* UMD Pstate */
struct amd_pp_profile gfx_power_profile;
struct amd_pp_profile compute_power_profile;
struct amd_pp_profile default_gfx_power_profile;
struct amd_pp_profile default_compute_power_profile;
enum amd_pp_profile_type current_power_profile;
+ bool en_umd_pstate;
+};
+
+struct cgs_irq_src_funcs {
+ cgs_irq_source_set_func_t set;
+ cgs_irq_handler_func_t handler;
};
extern int hwmgr_early_init(struct pp_instance *handle);
extern int hwmgr_hw_init(struct pp_instance *handle);
extern int hwmgr_hw_fini(struct pp_instance *handle);
+extern int hwmgr_hw_suspend(struct pp_instance *handle);
+extern int hwmgr_hw_resume(struct pp_instance *handle);
+extern int hwmgr_handle_task(struct pp_instance *handle,
+ enum amd_pp_task task_id,
+ void *input, void *output);
extern int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
uint32_t value, uint32_t mask);
-extern void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
+extern int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
uint32_t indirect_port,
uint32_t index,
uint32_t value,
uint32_t mask);
+extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
+ uint32_t index,
+ uint32_t value, uint32_t mask);
+extern int phm_wait_for_indirect_register_unequal(
+ struct pp_hwmgr *hwmgr,
+ uint32_t indirect_port, uint32_t index,
+ uint32_t value, uint32_t mask);
extern bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr);
@@ -888,5 +889,58 @@ extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_t
PHM_WAIT_INDIRECT_REGISTER(hwmgr, port, reg, (fieldval) \
<< PHM_FIELD_SHIFT(reg, field), PHM_FIELD_MASK(reg, field))
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX, index, value, mask)
+
+#define PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field) )
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_for_indirect_register_unequal(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, \
+ port, index, value, mask) \
+ phm_wait_on_indirect_register(hwmgr, \
+ mm##port##_INDEX_11, index, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, value, mask) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(hwmgr, port, ix##reg, value, mask)
+
+#define PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, port, reg, field, fieldval) \
+ PHM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, port, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
+
+#define PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ index, value, mask) \
+ phm_wait_for_register_unequal(hwmgr, \
+ index, value, mask)
+
+#define PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, value, mask) \
+ PHM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(hwmgr, \
+ mm##reg, value, mask)
+
+#define PHM_WAIT_FIELD_UNEQUAL(hwmgr, reg, field, fieldval) \
+ PHM_WAIT_REGISTER_UNEQUAL(hwmgr, reg, \
+ (fieldval) << PHM_FIELD_SHIFT(reg, field), \
+ PHM_FIELD_MASK(reg, field))
#endif /* _HWMGR_H_ */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
index 4c3b537a714f..25fb1460a194 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_instance.h
@@ -23,9 +23,7 @@
#ifndef _PP_INSTANCE_H_
#define _PP_INSTANCE_H_
-#include "smumgr.h"
#include "hwmgr.h"
-#include "eventmgr.h"
#define PP_VALID 0x1F1F1F1F
@@ -36,9 +34,7 @@ struct pp_instance {
bool pm_en;
uint32_t feature_mask;
void *device;
- struct pp_smumgr *smu_mgr;
struct pp_hwmgr *hwmgr;
- struct pp_eventmgr *eventmgr;
struct mutex pp_lock;
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index 5d61cc9d4554..7c9aba81cd6a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -23,23 +23,13 @@
#ifndef _SMUMGR_H_
#define _SMUMGR_H_
#include <linux/types.h>
-#include "pp_instance.h"
#include "amd_powerplay.h"
-
-struct pp_smumgr;
-struct pp_instance;
-struct pp_hwmgr;
+#include "hwmgr.h"
#define smu_lower_32_bits(n) ((uint32_t)(n))
#define smu_upper_32_bits(n) ((uint32_t)(((n)>>16)>>16))
-extern const struct pp_smumgr_func cz_smu_funcs;
-extern const struct pp_smumgr_func iceland_smu_funcs;
-extern const struct pp_smumgr_func tonga_smu_funcs;
-extern const struct pp_smumgr_func fiji_smu_funcs;
-extern const struct pp_smumgr_func polaris10_smu_funcs;
-extern const struct pp_smumgr_func vega10_smu_funcs;
-extern const struct pp_smumgr_func rv_smu_funcs;
+
enum AVFS_BTC_STATUS {
AVFS_BTC_BOOT = 0,
@@ -100,216 +90,44 @@ enum SMU_MAC_DEFINITION {
SMU_UVD_MCLK_HANDSHAKE_DISABLE,
};
+extern int smum_get_argument(struct pp_hwmgr *hwmgr);
-struct pp_smumgr_func {
- int (*smu_init)(struct pp_smumgr *smumgr);
- int (*smu_fini)(struct pp_smumgr *smumgr);
- int (*start_smu)(struct pp_smumgr *smumgr);
- int (*check_fw_load_finish)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*request_smu_load_fw)(struct pp_smumgr *smumgr);
- int (*request_smu_load_specific_fw)(struct pp_smumgr *smumgr,
- uint32_t firmware);
- int (*get_argument)(struct pp_smumgr *smumgr);
- int (*send_msg_to_smc)(struct pp_smumgr *smumgr, uint16_t msg);
- int (*send_msg_to_smc_with_parameter)(struct pp_smumgr *smumgr,
- uint16_t msg, uint32_t parameter);
- int (*download_pptable_settings)(struct pp_smumgr *smumgr,
- void **table);
- int (*upload_pptable_settings)(struct pp_smumgr *smumgr);
- int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type);
- int (*process_firmware_header)(struct pp_hwmgr *hwmgr);
- int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr);
- int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr);
- int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr);
- int (*init_smc_table)(struct pp_hwmgr *hwmgr);
- int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr);
- int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr);
- int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr);
- uint32_t (*get_offsetof)(uint32_t type, uint32_t member);
- uint32_t (*get_mac_definition)(uint32_t value);
- bool (*is_dpm_running)(struct pp_hwmgr *hwmgr);
- int (*populate_requested_graphic_levels)(struct pp_hwmgr *hwmgr,
- struct amd_pp_profile *request);
- bool (*is_hw_avfs_present)(struct pp_smumgr *smumgr);
-};
-
-struct pp_smumgr {
- uint32_t chip_family;
- uint32_t chip_id;
- void *device;
- void *backend;
- uint32_t usec_timeout;
- bool reload_fw;
- const struct pp_smumgr_func *smumgr_funcs;
- bool is_kicker;
-};
-
-extern int smum_early_init(struct pp_instance *handle);
+extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
-extern int smum_get_argument(struct pp_smumgr *smumgr);
+extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_download_powerplay_table(struct pp_smumgr *smumgr, void **table);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-extern int smum_upload_powerplay_table(struct pp_smumgr *smumgr);
-
-extern int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-
-extern int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-extern int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index, uint32_t value, uint32_t mask);
-
-extern int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
-
-extern void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port, uint32_t index,
- uint32_t value, uint32_t mask);
-
extern int smu_allocate_memory(void *device, uint32_t size,
enum cgs_gpu_mem_type type,
uint32_t byte_align, uint64_t *mc_addr,
void **kptr, void *handle);
extern int smu_free_memory(void *device, void *handle);
-extern int vega10_smum_init(struct pp_smumgr *smumgr);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr);
-extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
-extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result);
+extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr);
+extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
extern int smum_init_smc_table(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
-extern uint32_t smum_get_offsetof(struct pp_smumgr *smumgr,
+extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr,
uint32_t type, uint32_t member);
-extern uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value);
+extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value);
extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr);
extern int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request);
-extern bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr);
-
-#define SMUM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
-
-#define SMUM_FIELD_MASK(reg, field) reg##__##field##_MASK
-
-#define SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
-
-#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- index, value, mask) \
- smum_wait_for_register_unequal(smumgr, \
- index, value, mask)
-
-#define SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, value, mask) \
- SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- mm##reg, value, mask)
-
-#define SMUM_WAIT_FIELD_UNEQUAL(smumgr, reg, field, fieldval) \
- SMUM_WAIT_REGISTER_UNEQUAL(smumgr, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_GET_FIELD(value, reg, field) \
- (((value) & SMUM_FIELD_MASK(reg, field)) \
- >> SMUM_FIELD_SHIFT(reg, field))
-
-#define SMUM_READ_FIELD(device, reg, field) \
- SMUM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
-
-#define SMUM_SET_FIELD(value, reg, field, field_val) \
- (((value) & ~SMUM_FIELD_MASK(reg, field)) | \
- (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
- SMUM_FIELD_SHIFT(reg, field))))
-
-#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_on_indirect_register(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
- port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX_0, index, value, mask)
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
-
-
-/*Operations on named fields.*/
-
-#define SMUM_READ_VFPF_INDIRECT_FIELD(device, port, reg, field) \
- SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field)
-
-#define SMUM_WRITE_FIELD(device, reg, field, fieldval) \
- cgs_write_register(device, mm##reg, \
- SMUM_SET_FIELD(cgs_read_register(device, mm##reg), reg, field, fieldval))
-
-#define SMUM_WRITE_VFPF_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
- cgs_write_ind_register(device, port, ix##reg, \
- SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
- reg, field, fieldval))
-
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
- (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field))
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
- smum_wait_for_indirect_register_unequal(smumgr, \
- mm##port##_INDEX, index, value, mask)
-
-#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
+extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr);
-#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
- SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
- SMUM_FIELD_MASK(reg, field) )
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
index 1703bbefbfd5..a423c0a85129 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile
@@ -4,7 +4,7 @@
SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \
polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \
- smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o
+ smu7_smumgr.o iceland_smc.o vega10_smumgr.o rv_smumgr.o ci_smc.o ci_smumgr.o
AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c
new file mode 100644
index 000000000000..9ee14315dce7
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.c
@@ -0,0 +1,2753 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+#include <linux/types.h>
+
+#include "smumgr.h"
+#include "pp_debug.h"
+#include "ci_smc.h"
+#include "ci_smumgr.h"
+#include "ppsmc.h"
+#include "smu7_hwmgr.h"
+#include "hardwaremanager.h"
+#include "ppatomctrl.h"
+#include "cgs_common.h"
+#include "atombios.h"
+#include "pppcielanes.h"
+
+#include "smu/smu_7_0_1_d.h"
+#include "smu/smu_7_0_1_sh_mask.h"
+
+#include "dce/dce_8_0_d.h"
+#include "dce/dce_8_0_sh_mask.h"
+
+#include "bif/bif_4_1_d.h"
+#include "bif/bif_4_1_sh_mask.h"
+
+#include "gca/gfx_7_2_d.h"
+#include "gca/gfx_7_2_sh_mask.h"
+
+#include "gmc/gmc_7_1_d.h"
+#include "gmc/gmc_7_1_sh_mask.h"
+
+#include "processpptables.h"
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+
+static const struct ci_pt_defaults defaults_hawaii_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_hawaii_pro = {
+ 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
+ { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
+ { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+
+static const struct ci_pt_defaults defaults_saturn_xt = {
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+
+static int ci_set_smc_sram_address(struct pp_hwmgr *hwmgr,
+ uint32_t smc_addr, uint32_t limit)
+{
+ if ((0 != (3 & smc_addr))
+ || ((smc_addr + 3) >= limit)) {
+ pr_err("smc_addr invalid \n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ return 0;
+}
+
+static int ci_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
+ const uint8_t *src, uint32_t byte_count, uint32_t limit)
+{
+ int result;
+ uint32_t data = 0;
+ uint32_t original_data;
+ uint32_t addr = 0;
+ uint32_t extra_shift;
+
+ if ((3 & smc_start_address)
+ || ((smc_start_address + byte_count) >= limit)) {
+ pr_err("smc_start_address invalid \n");
+ return -EINVAL;
+ }
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* Bytes are written into the SMC address space with the MSB first. */
+ data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ if (0 != byte_count) {
+
+ data = 0;
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* Bytes are written into the SMC addres space with the MSB first. */
+ data = (0x100 * data) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ result = ci_set_smc_sram_address(hwmgr, addr, limit);
+
+ if (0 != result)
+ return result;
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ }
+
+ return 0;
+}
+
+
+static int ci_program_jump_on_start(struct pp_hwmgr *hwmgr)
+{
+ static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ ci_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
+
+ return 0;
+}
+
+bool ci_is_smc_ram_running(struct pp_hwmgr *hwmgr)
+{
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
+ CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device,
+ CGS_IND_REG__SMC, ixSMC_PC_C)));
+}
+
+static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
+ uint32_t *value, uint32_t limit)
+{
+ int result;
+
+ result = ci_set_smc_sram_address(hwmgr, smc_addr, limit);
+
+ if (result)
+ return result;
+
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_0);
+ return 0;
+}
+
+int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+{
+ int ret;
+
+ if (!ci_is_smc_ram_running(hwmgr))
+ return -EINVAL;
+
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
+
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
+
+ if (ret != 1)
+ pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+
+ return 0;
+}
+
+int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter)
+{
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(hwmgr, msg);
+}
+
+static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ switch (dev_id) {
+ case 0x67BA:
+ case 0x66B1:
+ smu_data->power_tune_defaults = &defaults_hawaii_pro;
+ break;
+ case 0x67B8:
+ case 0x66B0:
+ smu_data->power_tune_defaults = &defaults_hawaii_xt;
+ break;
+ case 0x6640:
+ case 0x6641:
+ case 0x6646:
+ case 0x6647:
+ smu_data->power_tune_defaults = &defaults_saturn_xt;
+ break;
+ case 0x6649:
+ case 0x6650:
+ case 0x6651:
+ case 0x6658:
+ case 0x665C:
+ case 0x665D:
+ case 0x67A0:
+ case 0x67A1:
+ case 0x67A2:
+ case 0x67A8:
+ case 0x67A9:
+ case 0x67AA:
+ case 0x67B9:
+ case 0x67BE:
+ default:
+ smu_data->power_tune_defaults = &defaults_bonaire_xt;
+ break;
+ }
+}
+
+static int ci_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
+ struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ uint32_t clock, uint32_t *vol)
+{
+ uint32_t i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *vol = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *vol = allowed_clock_voltage_table->entries[i - 1].v;
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct pp_hwmgr *hwmgr,
+ uint32_t clock, struct SMU7_Discrete_GraphicsLevel *sclk)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ uint32_t spll_func_cntl_4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ uint32_t cg_spll_spread_spectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ uint32_t cg_spll_spread_spectrum_2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ uint32_t ref_clock;
+ uint32_t ref_divider;
+ uint32_t fbdiv;
+ int result;
+
+ /* get the engine clock dividers for this clock value */
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr, clock, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.",
+ return result);
+
+ /* To get FBDIV we need to multiply this by 16384 and divide it by Fref. */
+ ref_clock = atomctrl_get_reference_clock(hwmgr);
+ ref_divider = 1 + dividers.uc_pll_ref_div;
+
+ /* low 14 bits is fraction and high 12 bits is divider */
+ fbdiv = dividers.ul_fb_div.ul_fb_divider & 0x3FFFFFF;
+
+ /* SPLL_FUNC_CNTL setup */
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_REF_DIV, dividers.uc_pll_ref_div);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl, CG_SPLL_FUNC_CNTL,
+ SPLL_PDIV_A, dividers.uc_pll_post_div);
+
+ /* SPLL_FUNC_CNTL_3 setup*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_FB_DIV, fbdiv);
+
+ /* set to use fractional accumulation*/
+ spll_func_cntl_3 = PHM_SET_FIELD(spll_func_cntl_3, CG_SPLL_FUNC_CNTL_3,
+ SPLL_DITHEN, 1);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_EngineSpreadSpectrumSupport)) {
+ struct pp_atomctrl_internal_ss_info ss_info;
+ uint32_t vco_freq = clock * dividers.uc_pll_post_div;
+
+ if (!atomctrl_get_engine_clock_spread_spectrum(hwmgr,
+ vco_freq, &ss_info)) {
+ uint32_t clk_s = ref_clock * 5 /
+ (ref_divider * ss_info.speed_spectrum_rate);
+ uint32_t clk_v = 4 * ss_info.speed_spectrum_percentage *
+ fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, CLKS, clk_s);
+ cg_spll_spread_spectrum = PHM_SET_FIELD(cg_spll_spread_spectrum,
+ CG_SPLL_SPREAD_SPECTRUM, SSEN, 1);
+ cg_spll_spread_spectrum_2 = PHM_SET_FIELD(cg_spll_spread_spectrum_2,
+ CG_SPLL_SPREAD_SPECTRUM_2, CLKV, clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (uint8_t)dividers.pll_post_divider;
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct pp_hwmgr *hwmgr,
+ const struct phm_phase_shedding_limits_table *pl,
+ uint32_t sclk, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ /* use the minimum phase shedding */
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (sclk < pl->entries[i].Sclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+}
+
+static uint8_t ci_get_sleep_divider_id_from_clock(uint32_t clock,
+ uint32_t clock_insr)
+{
+ uint8_t i;
+ uint32_t temp;
+ uint32_t min = min_t(uint32_t, clock_insr, CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ if (clock < min) {
+ pr_info("Engine clock can't satisfy stutter requirement!\n");
+ return 0;
+ }
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ temp = clock >> i;
+
+ if (temp >= min || i == 0)
+ break;
+ }
+ return i;
+}
+
+static int ci_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
+ uint32_t clock, uint16_t sclk_al_threshold,
+ struct SMU7_Discrete_GraphicsLevel *level)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+
+ result = ci_calculate_sclk_params(hwmgr, clock, level);
+
+ /* populate graphics levels */
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_sclk, clock,
+ (uint32_t *)(&level->MinVddc));
+ if (result) {
+ pr_err("vdd_dep_on_sclk table is NULL\n");
+ return result;
+ }
+
+ level->SclkFrequency = clock;
+ level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(hwmgr,
+ hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ clock,
+ &level->MinVddcPhases);
+
+ level->ActivityLevel = sclk_al_threshold;
+ level->CcPwrDynRm = 0;
+ level->CcPwrDynRm1 = 0;
+ level->EnabledForActivity = 0;
+ /* this level can be used for throttling.*/
+ level->EnabledForThrottle = 1;
+ level->UpH = 0;
+ level->DownH = 0;
+ level->VoltageDownH = 0;
+ level->PowerThrottle = 0;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkDeepSleep))
+ level->DeepSleepDivId =
+ ci_get_sleep_divider_id_from_clock(clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ /* Default to slow, highest DPM level will be set to PPSMC_DISPLAY_WATERMARK_LOW later.*/
+ level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (0 == result) {
+ level->MinVddc = PP_HOST_TO_SMC_UL(level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->MinVddcPhases);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1);
+ }
+
+ return result;
+}
+
+int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result = 0;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t i;
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ result = ci_populate_single_graphic_level(hwmgr,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (uint16_t)smu_data->activity_target[i],
+ &levels[i]);
+ if (result)
+ return result;
+ if (i > 1)
+ smu_data->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
+ if (i == (dpm_table->sclk_table.count - 1))
+ smu_data->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ smu_data->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
+
+ smu_data->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ result = ci_copy_bytes_to_smc(hwmgr, array,
+ (u8 *)levels, array_size,
+ SMC_RAM_END);
+
+ return result;
+
+}
+
+static int ci_populate_svi_load_line(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
+ smu_data->power_tune_table.SviLoadLineVddC = defaults->svi_load_line_vddc;
+ smu_data->power_tune_table.SviLoadLineTrimVddC = 3;
+ smu_data->power_tune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int ci_populate_tdc_limit(struct pp_hwmgr *hwmgr)
+{
+ uint16_t tdc_limit;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+
+ tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
+ smu_data->power_tune_table.TDC_VDDC_PkgLimit =
+ CONVERT_FROM_HOST_TO_SMC_US(tdc_limit);
+ smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ defaults->tdc_vddc_throttle_release_limit_perc;
+ smu_data->power_tune_table.TDC_MAWt = defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int ci_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+ uint32_t temp;
+
+ if (ci_read_smc_sram_dword(hwmgr,
+ fuse_table_offset +
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+ (uint32_t *)&temp, SMC_RAM_END))
+ PP_ASSERT_WITH_CODE(false,
+ "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!",
+ return -EINVAL);
+ else
+ smu_data->power_tune_table.TdcWaterfallCtl = defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int ci_populate_fuzzy_fan(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
+{
+ uint16_t tmp;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+ if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
+ || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity)
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity;
+ else
+ tmp = hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity;
+
+ smu_data->power_tune_table.FuzzyFan_PwmSetDelta = CONVERT_FROM_HOST_TO_SMC_US(tmp);
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ uint8_t *hi2_vid = smu_data->power_tune_table.BapmVddCVidHiSidd2;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.cac_leakage_table,
+ "The CAC Leakage table does not exist!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count <= 8,
+ "There should never be more than 8 entries for BapmVddcVid!!!", return -EINVAL);
+ PP_ASSERT_WITH_CODE(hwmgr->dyn_state.cac_leakage_table->count == hwmgr->dyn_state.vddc_dependency_on_sclk->count,
+ "CACLeakageTable->count and VddcDependencyOnSCLk->count not equal", return -EINVAL);
+
+ for (i = 0; (uint32_t) i < hwmgr->dyn_state.cac_leakage_table->count; i++) {
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EVV)) {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc1);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc2);
+ hi2_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc3);
+ } else {
+ lo_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Vddc);
+ hi_vid[i] = convert_to_vid(hwmgr->dyn_state.cac_leakage_table->entries[i].Leakage);
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_vddc_vid(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint8_t *vid = smu_data->power_tune_table.VddCVid;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 8,
+ "There should never be more than 8 entries for VddcVid!!!",
+ return -EINVAL);
+
+ for (i = 0; i < (int)data->vddc_voltage_table.count; i++)
+ vid[i] = convert_to_vid(data->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ u8 *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
+ }
+
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ smu_data->power_tune_table.GnbLPMLMaxVid = (u8)max;
+ smu_data->power_tune_table.GnbLPMLMinVid = (u8)min;
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
+ uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
+ struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
+
+ HiSidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256);
+ LoSidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256);
+
+ smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(HiSidd);
+ smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd =
+ CONVERT_FROM_HOST_TO_SMC_US(LoSidd);
+
+ return 0;
+}
+
+static int ci_populate_pm_fuses(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint32_t pm_fuse_table_offset;
+ int ret = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_PowerContainment)) {
+ if (ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, SMC_RAM_END)) {
+ pr_err("Attempt to get pm_fuse_table_offset Failed!\n");
+ return -EINVAL;
+ }
+
+ /* DW0 - DW3 */
+ ret = ci_populate_bapm_vddc_vid_sidd(hwmgr);
+ /* DW4 - DW5 */
+ ret |= ci_populate_vddc_vid(hwmgr);
+ /* DW6 */
+ ret |= ci_populate_svi_load_line(hwmgr);
+ /* DW7 */
+ ret |= ci_populate_tdc_limit(hwmgr);
+ /* DW8 */
+ ret |= ci_populate_dw8(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_populate_fuzzy_fan(hwmgr, pm_fuse_table_offset);
+
+ ret |= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(hwmgr);
+
+ ret |= ci_populate_bapm_vddc_base_leakage_sidd(hwmgr);
+ if (ret)
+ return ret;
+
+ ret = ci_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
+ (uint8_t *)&smu_data->power_tune_table,
+ sizeof(struct SMU7_Discrete_PmFuses), SMC_RAM_END);
+ }
+ return ret;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ const struct ci_pt_defaults *defaults = smu_data->power_tune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
+ struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
+ struct phm_ppm_table *ppm = hwmgr->dyn_state.ppm_parameter_table;
+ const uint16_t *def1, *def2;
+ int i, j, k;
+
+ dpm_table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 256));
+ dpm_table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usConfigurableTDP * 256));
+
+ dpm_table->DTETjOffset = 0;
+ dpm_table->GpuTjMax = (uint8_t)(data->thermal_temp_setting.temperature_high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = (uint16_t)ppm->dgpu_tdp * 256 / 1000;
+ dpm_table->PPM_TemperatureLimit = (uint16_t)ppm->tj_max * 256;
+ } else {
+ dpm_table->PPM_PkgPwrLimit = 0;
+ dpm_table->PPM_TemperatureLimit = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
+ CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
+
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
+ def1 = defaults->bapmti_r;
+ def2 = defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ci_get_std_voltage_value_sidd(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab, uint16_t *hi,
+ uint16_t *lo)
+{
+ uint16_t v_index;
+ bool vol_found = false;
+ *hi = tab->value * VOLTAGE_SCALE;
+ *lo = tab->value * VOLTAGE_SCALE;
+
+ PP_ASSERT_WITH_CODE(NULL != hwmgr->dyn_state.vddc_dependency_on_sclk,
+ "The SCLK/VDDC Dependency Table does not exist.\n",
+ return -EINVAL);
+
+ if (NULL == hwmgr->dyn_state.cac_leakage_table) {
+ pr_warn("CAC Leakage Table does not exist, using vddc.\n");
+ return 0;
+ }
+
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value == hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage * VOLTAGE_SCALE);
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index, using maximum index from CAC table.\n");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found) {
+ for (v_index = 0; (uint32_t)v_index < hwmgr->dyn_state.vddc_dependency_on_sclk->count; v_index++) {
+ if (tab->value <= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[v_index].v) {
+ vol_found = true;
+ if ((uint32_t)v_index < hwmgr->dyn_state.cac_leakage_table->count) {
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[v_index].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[v_index].Leakage) * VOLTAGE_SCALE;
+ } else {
+ pr_warn("Index from SCLK/VDDC Dependency Table exceeds the CAC Leakage Table index in second look up, using maximum index from CAC table.");
+ *lo = hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Vddc * VOLTAGE_SCALE;
+ *hi = (uint16_t)(hwmgr->dyn_state.cac_leakage_table->entries[hwmgr->dyn_state.cac_leakage_table->count - 1].Leakage * VOLTAGE_SCALE);
+ }
+ break;
+ }
+ }
+
+ if (!vol_found)
+ pr_warn("Unable to get std_vddc from SCLK/VDDC Dependency Table, using vddc.\n");
+ }
+
+ return 0;
+}
+
+static int ci_populate_smc_voltage_table(struct pp_hwmgr *hwmgr,
+ pp_atomctrl_voltage_table_entry *tab,
+ SMU7_Discrete_VoltageLevel *smc_voltage_tab)
+{
+ int result;
+
+ result = ci_get_std_voltage_value_sidd(hwmgr, tab,
+ &smc_voltage_tab->StdVoltageHiSidd,
+ &smc_voltage_tab->StdVoltageLoSidd);
+ if (result) {
+ smc_voltage_tab->StdVoltageHiSidd = tab->value * VOLTAGE_SCALE;
+ smc_voltage_tab->StdVoltageLoSidd = tab->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_tab->Voltage = PP_HOST_TO_SMC_US(tab->value * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageHiSidd);
+ CONVERT_FROM_HOST_TO_SMC_US(smc_voltage_tab->StdVoltageLoSidd);
+
+ return 0;
+}
+
+static int ci_populate_smc_vddc_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ table->VddcLevelCount = data->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
+ &(data->vddc_voltage_table.entries[count]),
+ &(table->VddcLevel[count]));
+ PP_ASSERT_WITH_CODE(0 == result, "do not populate SMC VDDC voltage table", return -EINVAL);
+
+ /* GPIO voltage control */
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control)
+ table->VddcLevel[count].Smio |= data->vddc_voltage_table.entries[count].smio_low;
+ else
+ table->VddcLevel[count].Smio = 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_vdd_ci_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->VddciLevelCount = data->vddci_voltage_table.count;
+
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
+ &(data->vddci_voltage_table.entries[count]),
+ &(table->VddciLevel[count]));
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC VDDCI voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control)
+ table->VddciLevel[count].Smio |= data->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t count;
+ int result;
+
+ table->MvddLevelCount = data->mvdd_voltage_table.count;
+
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ result = ci_populate_smc_voltage_table(hwmgr,
+ &(data->mvdd_voltage_table.entries[count]),
+ &table->MvddLevel[count]);
+ PP_ASSERT_WITH_CODE(result == 0, "do not populate SMC mvdd voltage table", return -EINVAL);
+ if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control)
+ table->MvddLevel[count].Smio |= data->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio |= 0;
+ }
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
+
+ return 0;
+}
+
+
+static int ci_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result;
+
+ result = ci_populate_smc_vddc_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDC voltage table to SMC", return -EINVAL);
+
+ result = ci_populate_smc_vdd_ci_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate VDDCI voltage table to SMC", return -EINVAL);
+
+ result = ci_populate_smc_mvdd_table(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "can not populate MVDD voltage table to SMC", return -EINVAL);
+
+ return 0;
+}
+
+static int ci_populate_ulv_level(struct pp_hwmgr *hwmgr,
+ struct SMU7_Discrete_Ulv *state)
+{
+ uint32_t voltage_response_time, ulv_voltage;
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ result = pp_tables_get_response_times(hwmgr, &voltage_response_time, &ulv_voltage);
+ PP_ASSERT_WITH_CODE((0 == result), "can not get ULV voltage value", return result;);
+
+ if (ulv_voltage == 0) {
+ data->ulv_supported = false;
+ return 0;
+ }
+
+ if (data->voltage_control != SMU7_VOLTAGE_CONTROL_BY_SVID2) {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffset = 0;
+ else
+ /* used in SMIO Mode. not implemented for now. this is backup only for CI. */
+ state->VddcOffset = (uint16_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage);
+ } else {
+ /* use minimum voltage if ulv voltage in pptable is bigger than minimum voltage */
+ if (ulv_voltage > hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v)
+ state->VddcOffsetVid = 0;
+ else /* used in SVI2 Mode */
+ state->VddcOffsetVid = (uint8_t)(
+ (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].v - ulv_voltage)
+ * VOLTAGE_VID_OFFSET_SCALE2
+ / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = 1;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1);
+ CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset);
+
+ return 0;
+}
+
+static int ci_populate_ulv_state(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_Ulv *ulv_level)
+{
+ return ci_populate_ulv_level(hwmgr, ulv_level);
+}
+
+static int ci_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU7_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint32_t i;
+
+/* Index dpm_table->pcie_speed_table.count is reserved for PCIE boot level.*/
+ for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ (uint8_t)encode_pcie_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = PP_HOST_TO_SMC_UL(5);
+ table->LinkLevel[i].UpT = PP_HOST_TO_SMC_UL(30);
+ }
+
+ smu_data->smc_state_table.LinkLevelCount =
+ (uint8_t)dpm_table->pcie_speed_table.count;
+ data->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+
+ return 0;
+}
+
+static int ci_calculate_mclk_params(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU7_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dllStateOn
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+ uint32_t mpll_ad_func_cntl = data->clock_registers.vMPLL_AD_FUNC_CNTL;
+ uint32_t mpll_dq_func_cntl = data->clock_registers.vMPLL_DQ_FUNC_CNTL;
+ uint32_t mpll_func_cntl = data->clock_registers.vMPLL_FUNC_CNTL;
+ uint32_t mpll_func_cntl_1 = data->clock_registers.vMPLL_FUNC_CNTL_1;
+ uint32_t mpll_func_cntl_2 = data->clock_registers.vMPLL_FUNC_CNTL_2;
+ uint32_t mpll_ss1 = data->clock_registers.vMPLL_SS1;
+ uint32_t mpll_ss2 = data->clock_registers.vMPLL_SS2;
+
+ pp_atomctrl_memory_clock_param mpll_param;
+ int result;
+
+ result = atomctrl_get_memory_pll_dividers_si(hwmgr,
+ memory_clock, &mpll_param, strobe_mode);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Error retrieving Memory Clock Parameters from VBIOS.", return result);
+
+ mpll_func_cntl = PHM_SET_FIELD(mpll_func_cntl, MPLL_FUNC_CNTL, BWCTRL, mpll_param.bw_ctrl);
+
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKF, mpll_param.mpll_fb_divider.cl_kf);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, CLKFRAC, mpll_param.mpll_fb_divider.clk_frac);
+ mpll_func_cntl_1 = PHM_SET_FIELD(mpll_func_cntl_1,
+ MPLL_FUNC_CNTL_1, VCO_MODE, mpll_param.vco_mode);
+
+ mpll_ad_func_cntl = PHM_SET_FIELD(mpll_ad_func_cntl,
+ MPLL_AD_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+
+ if (data->is_memory_gddr5) {
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_SEL, mpll_param.yclk_sel);
+ mpll_dq_func_cntl = PHM_SET_FIELD(mpll_dq_func_cntl,
+ MPLL_DQ_FUNC_CNTL, YCLK_POST_DIV, mpll_param.mpll_post_divider);
+ }
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MemorySpreadSpectrumSupport)) {
+ pp_atomctrl_internal_ss_info ss_info;
+ uint32_t freq_nom;
+ uint32_t tmp;
+ uint32_t reference_clock = atomctrl_get_mpll_reference_clock(hwmgr);
+
+ /* for GDDR5 for all modes and DDR3 */
+ if (1 == mpll_param.qdr)
+ freq_nom = memory_clock * 4 * (1 << mpll_param.mpll_post_divider);
+ else
+ freq_nom = memory_clock * 2 * (1 << mpll_param.mpll_post_divider);
+
+ /* tmp = (freq_nom / reference_clock * reference_divider) ^ 2 Note: S.I. reference_divider = 1*/
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+
+ if (0 == atomctrl_get_memory_clock_spread_spectrum(hwmgr, freq_nom, &ss_info)) {
+ uint32_t clks = reference_clock * 5 / ss_info.speed_spectrum_rate;
+ uint32_t clkv =
+ (uint32_t)((((131 * ss_info.speed_spectrum_percentage *
+ ss_info.speed_spectrum_rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 = PHM_SET_FIELD(mpll_ss1, MPLL_SS1, CLKV, clkv);
+ mpll_ss2 = PHM_SET_FIELD(mpll_ss2, MPLL_SS2, CLKS, clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, DLL_SPEED, mpll_param.dll_speed);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, dllStateOn);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, dllStateOn);
+
+
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static uint8_t ci_get_mclk_frequency_ratio(uint32_t memory_clock,
+ bool strobe_mode)
+{
+ uint8_t mc_para_index;
+
+ if (strobe_mode) {
+ if (memory_clock < 12500)
+ mc_para_index = 0x00;
+ else if (memory_clock > 47500)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 2500);
+ } else {
+ if (memory_clock < 65000)
+ mc_para_index = 0x00;
+ else if (memory_clock > 135000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 60000) / 5000);
+ }
+
+ return mc_para_index;
+}
+
+static uint8_t ci_get_ddr3_mclk_frequency_ratio(uint32_t memory_clock)
+{
+ uint8_t mc_para_index;
+
+ if (memory_clock < 10000)
+ mc_para_index = 0;
+ else if (memory_clock >= 80000)
+ mc_para_index = 0x0f;
+ else
+ mc_para_index = (uint8_t)((memory_clock - 10000) / 5000 + 1);
+
+ return mc_para_index;
+}
+
+static int ci_populate_phase_value_based_on_mclk(struct pp_hwmgr *hwmgr, const struct phm_phase_shedding_limits_table *pl,
+ uint32_t memory_clock, uint32_t *p_shed)
+{
+ unsigned int i;
+
+ *p_shed = 1;
+
+ for (i = 0; i < pl->count; i++) {
+ if (memory_clock < pl->entries[i].Mclk) {
+ *p_shed = i;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_single_memory_level(
+ struct pp_hwmgr *hwmgr,
+ uint32_t memory_clock,
+ SMU7_Discrete_MemoryLevel *memory_level
+ )
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int result = 0;
+ bool dll_state_on;
+ struct cgs_display_info info = {0};
+ uint32_t mclk_edc_wr_enable_threshold = 40000;
+ uint32_t mclk_edc_enable_threshold = 40000;
+ uint32_t mclk_strobe_mode_threshold = 40000;
+
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
+ }
+
+ if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.vddci_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinVddci);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory VDDCI voltage dependency table", return result);
+ }
+
+ if (NULL != hwmgr->dyn_state.mvdd_dependency_on_mclk) {
+ result = ci_get_dependency_volt_by_clk(hwmgr,
+ hwmgr->dyn_state.mvdd_dependency_on_mclk,
+ memory_clock,
+ &memory_level->MinMvdd);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find MinVddci voltage value from memory MVDD voltage dependency table", return result);
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (data->vddc_phase_shed_control) {
+ ci_populate_phase_value_based_on_mclk(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table,
+ memory_clock, &memory_level->MinVddcPhases);
+ }
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
+
+ /* Indicates maximum activity level for this performance level.*/
+ memory_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
+ memory_level->StutterEnable = 0;
+ memory_level->StrobeEnable = 0;
+ memory_level->EdcReadEnable = 0;
+ memory_level->EdcWriteEnable = 0;
+ memory_level->RttEnable = 0;
+
+ /* default set to low watermark. Highest level will be set to high later.*/
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ cgs_get_active_displays_info(hwmgr->device, &info);
+ data->display_timing.num_existing_displays = info.display_count;
+
+ /* stutter mode not support on ci */
+
+ /* decide strobe mode*/
+ memory_level->StrobeEnable = (mclk_strobe_mode_threshold != 0) &&
+ (memory_clock <= mclk_strobe_mode_threshold);
+
+ /* decide EDC mode and memory clock ratio*/
+ if (data->is_memory_gddr5) {
+ memory_level->StrobeRatio = ci_get_mclk_frequency_ratio(memory_clock,
+ memory_level->StrobeEnable);
+
+ if ((mclk_edc_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_enable_threshold)) {
+ memory_level->EdcReadEnable = 1;
+ }
+
+ if ((mclk_edc_wr_enable_threshold != 0) &&
+ (memory_clock > mclk_edc_wr_enable_threshold)) {
+ memory_level->EdcWriteEnable = 1;
+ }
+
+ if (memory_level->StrobeEnable) {
+ if (ci_get_mclk_frequency_ratio(memory_clock, 1) >=
+ ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ else
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC6) >> 1) & 0x1) ? 1 : 0;
+ } else
+ dll_state_on = data->dll_default_on;
+ } else {
+ memory_level->StrobeRatio =
+ ci_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((cgs_read_register(hwmgr->device, mmMC_SEQ_MISC5) >> 1) & 0x1) ? 1 : 0;
+ }
+
+ result = ci_calculate_mclk_params(hwmgr,
+ memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+
+ if (0 == result) {
+ memory_level->MinVddc = PP_HOST_TO_SMC_UL(memory_level->MinVddc * VOLTAGE_SCALE);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MinVddcPhases);
+ memory_level->MinVddci = PP_HOST_TO_SMC_UL(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = PP_HOST_TO_SMC_UL(memory_level->MinMvdd * VOLTAGE_SCALE);
+ /* MCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkFrequency);
+ /* Indicates maximum activity level for this performance level.*/
+ CONVERT_FROM_HOST_TO_SMC_US(memory_level->ActivityLevel);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllFuncCntl_2);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllAdFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllDqFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MclkPwrmgtCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->DllCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs1);
+ CONVERT_FROM_HOST_TO_SMC_UL(memory_level->MpllSs2);
+ }
+
+ return result;
+}
+
+int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_dpm_table *dpm_table = &data->dpm_table;
+ int result;
+ struct cgs_system_info sys_info = {0};
+ uint32_t dev_id;
+
+ uint32_t level_array_address = smu_data->dpm_table_start + offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ uint32_t level_array_size = sizeof(SMU7_Discrete_MemoryLevel) * SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = smu_data->smc_state_table.MemoryLevel;
+ uint32_t i;
+
+ memset(levels, 0x00, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value),
+ "can not populate memory level as memory clock is zero", return -EINVAL);
+ result = ci_populate_single_memory_level(hwmgr, dpm_table->mclk_table.dpm_levels[i].value,
+ &(smu_data->smc_state_table.MemoryLevel[i]));
+ if (0 != result)
+ return result;
+ }
+
+ smu_data->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
+
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
+ cgs_query_system_info(hwmgr->device, &sys_info);
+ dev_id = (uint32_t)sys_info.value;
+
+ if ((dpm_table->mclk_table.count >= 2)
+ && ((dev_id == 0x67B0) || (dev_id == 0x67B1))) {
+ smu_data->smc_state_table.MemoryLevel[1].MinVddci =
+ smu_data->smc_state_table.MemoryLevel[0].MinVddci;
+ smu_data->smc_state_table.MemoryLevel[1].MinMvdd =
+ smu_data->smc_state_table.MemoryLevel[0].MinMvdd;
+ }
+ smu_data->smc_state_table.MemoryLevel[0].ActivityLevel = 0x1F;
+ CONVERT_FROM_HOST_TO_SMC_US(smu_data->smc_state_table.MemoryLevel[0].ActivityLevel);
+
+ smu_data->smc_state_table.MemoryDpmLevelCount = (uint8_t)dpm_table->mclk_table.count;
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask = phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+ smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ result = ci_copy_bytes_to_smc(hwmgr,
+ level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
+ SMC_RAM_END);
+
+ return result;
+}
+
+static int ci_populate_mvdd_value(struct pp_hwmgr *hwmgr, uint32_t mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
+{
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ uint32_t i = 0;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) {
+ /* find mvdd value which clock is more than request */
+ for (i = 0; i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count; i++) {
+ if (mclk <= hwmgr->dyn_state.mvdd_dependency_on_mclk->entries[i].clk) {
+ /* Always round to higher voltage. */
+ voltage->Voltage = data->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ PP_ASSERT_WITH_CODE(i < hwmgr->dyn_state.mvdd_dependency_on_mclk->count,
+ "MVDD Voltage is outside the supported range.", return -EINVAL);
+
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = 0;
+ const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct pp_atomctrl_clock_dividers_vi dividers;
+
+ SMU7_Discrete_VoltageLevel voltage_level;
+ uint32_t spll_func_cntl = data->clock_registers.vCG_SPLL_FUNC_CNTL;
+ uint32_t spll_func_cntl_2 = data->clock_registers.vCG_SPLL_FUNC_CNTL_2;
+ uint32_t dll_cntl = data->clock_registers.vDLL_CNTL;
+ uint32_t mclk_pwrmgt_cntl = data->clock_registers.vMCLK_PWRMGT_CNTL;
+
+
+ /* The ACPI state should not do DPM on DC (or ever).*/
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (data->acpi_vddc)
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = PP_HOST_TO_SMC_UL(data->min_vddc_in_pptable * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = data->vddc_phase_shed_control ? 0 : 1;
+ /* assign zero for now*/
+ table->ACPILevel.SclkFrequency = atomctrl_get_reference_clock(hwmgr);
+
+ /* get the engine clock dividers for this clock value*/
+ result = atomctrl_get_engine_pll_dividers_vi(hwmgr,
+ table->ACPILevel.SclkFrequency, &dividers);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error retrieving Engine Clock dividers from VBIOS.", return result);
+
+ /* divider ID for required SCLK*/
+ table->ACPILevel.SclkDid = (uint8_t)dividers.pll_post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_PWRON, 0);
+ spll_func_cntl = PHM_SET_FIELD(spll_func_cntl,
+ CG_SPLL_FUNC_CNTL, SPLL_RESET, 1);
+ spll_func_cntl_2 = PHM_SET_FIELD(spll_func_cntl_2,
+ CG_SPLL_FUNC_CNTL_2, SCLK_MUX_SEL, 4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = data->clock_registers.vCG_SPLL_FUNC_CNTL_3;
+ table->ACPILevel.CgSpllFuncCntl4 = data->clock_registers.vCG_SPLL_FUNC_CNTL_4;
+ table->ACPILevel.SpllSpreadSpectrum = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM;
+ table->ACPILevel.SpllSpreadSpectrum2 = data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ /* For various features to be enabled/disabled while this level is active.*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags);
+ /* SCLK frequency in units of 10KHz*/
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl3);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CgSpllFuncCntl4);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SpllSpreadSpectrum2);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1);
+
+
+ /* table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;*/
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
+ table->MemoryACPILevel.MinVddci = table->MemoryACPILevel.MinVddc;
+ else {
+ if (data->acpi_vddci != 0)
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci = PP_HOST_TO_SMC_UL(data->min_vddci_in_pptable * VOLTAGE_SCALE);
+ }
+
+ if (0 == ci_populate_mvdd_value(hwmgr, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd =
+ PP_HOST_TO_SMC_UL(voltage_level.Voltage * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinMvdd = 0;
+
+ /* Force reset on DLL*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_RESET, 0x1);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_RESET, 0x1);
+
+ /* Disable DLL in ACPIState*/
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK0_PDNB, 0);
+ mclk_pwrmgt_cntl = PHM_SET_FIELD(mclk_pwrmgt_cntl,
+ MCLK_PWRMGT_CNTL, MRDCK1_PDNB, 0);
+
+ /* Enable DLL bypass signal*/
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK0_BYPASS, 0);
+ dll_cntl = PHM_SET_FIELD(dll_cntl,
+ DLL_CNTL, MRDCK1_BYPASS, 0);
+
+ table->MemoryACPILevel.DllCntl =
+ PP_HOST_TO_SMC_UL(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl =
+ PP_HOST_TO_SMC_UL(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_AD_FUNC_CNTL);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_DQ_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_FUNC_CNTL_2);
+ table->MemoryACPILevel.MpllSs1 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS1);
+ table->MemoryACPILevel.MpllSs2 =
+ PP_HOST_TO_SMC_UL(data->clock_registers.vMPLL_SS2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
+ /* Indicates maximum activity level for this performance level.*/
+ table->MemoryACPILevel.ActivityLevel = PP_HOST_TO_SMC_US((uint16_t)data->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = 0;
+ table->MemoryACPILevel.StrobeEnable = 0;
+ table->MemoryACPILevel.EdcReadEnable = 0;
+ table->MemoryACPILevel.EdcWriteEnable = 0;
+ table->MemoryACPILevel.RttEnable = 0;
+
+ return result;
+}
+
+static int ci_populate_smc_uvd_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = 0;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_uvd_clock_voltage_dependency_table *uvd_table =
+ hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
+
+ table->UvdLevelCount = (uint8_t)(uvd_table->count);
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ uvd_table->entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ uvd_table->entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ uvd_table->entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].VclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Vclk clock", return result);
+
+ table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->UvdLevel[count].DclkFrequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for Dclk clock", return result);
+
+ table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider;
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->UvdLevel[count].MinVddc);
+ }
+
+ return result;
+}
+
+static int ci_populate_smc_vce_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_vce_clock_voltage_dependency_table *vce_table =
+ hwmgr->dyn_state.vce_clock_voltage_dependency_table;
+
+ table->VceLevelCount = (uint8_t)(vce_table->count);
+ table->VceBootLevel = 0;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency = vce_table->entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ vce_table->entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->VceLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for VCE engine clock",
+ return result);
+
+ table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VceLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int ci_populate_smc_acp_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_acp_clock_voltage_dependency_table *acp_table =
+ hwmgr->dyn_state.acp_clock_voltage_dependency_table;
+
+ table->AcpLevelCount = (uint8_t)(acp_table->count);
+ table->AcpBootLevel = 0;
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency = acp_table->entries[count].acpclk;
+ table->AcpLevel[count].MinVoltage = acp_table->entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->AcpLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for engine clock", return result);
+
+ table->AcpLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->AcpLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->AcpLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int ci_populate_smc_samu_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = -EINVAL;
+ uint8_t count;
+ struct pp_atomctrl_clock_dividers_vi dividers;
+ struct phm_samu_clock_voltage_dependency_table *samu_table =
+ hwmgr->dyn_state.samu_clock_voltage_dependency_table;
+
+ table->SamuBootLevel = 0;
+ table->SamuLevelCount = (uint8_t)(samu_table->count);
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency = samu_table->entries[count].samclk;
+ table->SamuLevel[count].MinVoltage = samu_table->entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ /* retrieve divider value for VBIOS */
+ result = atomctrl_get_dfs_pll_dividers_vi(hwmgr,
+ table->SamuLevel[count].Frequency, &dividers);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "can not find divide id for samu clock", return result);
+
+ table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency);
+ CONVERT_FROM_HOST_TO_SMC_US(table->SamuLevel[count].MinVoltage);
+ }
+ return result;
+}
+
+static int ci_populate_memory_timing_parameters(
+ struct pp_hwmgr *hwmgr,
+ uint32_t engine_clock,
+ uint32_t memory_clock,
+ struct SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs
+ )
+{
+ uint32_t dramTiming;
+ uint32_t dramTiming2;
+ uint32_t burstTime;
+ int result;
+
+ result = atomctrl_set_engine_dram_timings_rv770(hwmgr,
+ engine_clock, memory_clock);
+
+ PP_ASSERT_WITH_CODE(result == 0,
+ "Error calling VBIOS to set DRAM_TIMING.", return result);
+
+ dramTiming = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
+ dramTiming2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
+ burstTime = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
+
+ arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dramTiming);
+ arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dramTiming2);
+ arb_regs->McArbBurstTime = (uint8_t)burstTime;
+
+ return 0;
+}
+
+static int ci_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ int result = 0;
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
+ uint32_t i, j;
+
+ memset(&arb_regs, 0x00, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < data->dpm_table.mclk_table.count; j++) {
+ result = ci_populate_memory_timing_parameters
+ (hwmgr, data->dpm_table.sclk_table.dpm_levels[i].value,
+ data->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+
+ if (0 != result)
+ break;
+ }
+ }
+
+ if (0 == result) {
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->arb_table_start,
+ (uint8_t *)&arb_regs,
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
+ SMC_RAM_END
+ );
+ }
+
+ return result;
+}
+
+static int ci_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ /* find boot level from dpm table*/
+ result = phm_find_boot_level(&(data->dpm_table.sclk_table),
+ data->vbios_boot_state.sclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.GraphicsBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.GraphicsBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Graphics DPM level 0!");
+ result = 0;
+ }
+
+ result = phm_find_boot_level(&(data->dpm_table.mclk_table),
+ data->vbios_boot_state.mclk_bootup_value,
+ (uint32_t *)&(smu_data->smc_state_table.MemoryBootLevel));
+
+ if (0 != result) {
+ smu_data->smc_state_table.MemoryBootLevel = 0;
+ pr_err("VBIOS did not find boot engine clock value \
+ in dependency table. Using Memory DPM level 0!");
+ result = 0;
+ }
+
+ table->BootVddc = data->vbios_boot_state.vddc_bootup_value;
+ table->BootVddci = data->vbios_boot_state.vddci_bootup_value;
+ table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value;
+
+ return result;
+}
+
+static int ci_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ const struct ci_smumgr *smu_data = (struct ci_smumgr *)hwmgr->smu_backend;
+
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < smu_data->mc_reg_table.last; j++) {
+ if (smu_data->mc_reg_table.validflag & 1<<j) {
+ PP_ASSERT_WITH_CODE(i < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE,
+ "Index of mc_reg_table->address[] array out of boundary", return -EINVAL);
+ mc_reg_table->address[i].s0 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 =
+ PP_HOST_TO_SMC_US(smu_data->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (uint8_t)i;
+
+ return 0;
+}
+
+static void ci_convert_mc_registers(
+ const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
+ uint32_t num_entries, uint32_t valid_flag)
+{
+ uint32_t i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & 1<<j) {
+ data->value[i] = PP_HOST_TO_SMC_UL(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static int ci_convert_mc_reg_table_entry_to_smc(
+ struct pp_hwmgr *hwmgr,
+ const uint32_t memory_clock,
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data
+ )
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint32_t i = 0;
+
+ for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
+ if (memory_clock <=
+ smu_data->mc_reg_table.mc_reg_table_entry[i].mclk_max) {
+ break;
+ }
+ }
+
+ if ((i == smu_data->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ ci_convert_mc_registers(&smu_data->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, smu_data->mc_reg_table.last,
+ smu_data->mc_reg_table.validflag);
+
+ return 0;
+}
+
+static int ci_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_MCRegisters *mc_regs)
+{
+ int result = 0;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ int res;
+ uint32_t i;
+
+ for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
+ res = ci_convert_mc_reg_table_entry_to_smc(
+ hwmgr,
+ data->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_regs->data[i]
+ );
+
+ if (0 != res)
+ result = res;
+ }
+
+ return result;
+}
+
+static int ci_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ uint32_t address;
+ int32_t result;
+
+ if (0 == (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+
+ memset(&smu_data->mc_regs, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &(smu_data->mc_regs));
+
+ if (result != 0)
+ return result;
+
+ address = smu_data->mc_reg_table_start + (uint32_t)offsetof(SMU7_Discrete_MCRegisters, data[0]);
+
+ return ci_copy_bytes_to_smc(hwmgr, address,
+ (uint8_t *)&smu_data->mc_regs.data[0],
+ sizeof(SMU7_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
+ SMC_RAM_END);
+}
+
+static int ci_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+ memset(&smu_data->mc_regs, 0x00, sizeof(SMU7_Discrete_MCRegisters));
+ result = ci_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for the MC register addresses!", return result;);
+
+ result = ci_convert_mc_reg_table_to_smc(hwmgr, &smu_data->mc_regs);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize MCRegTable for driver state!", return result;);
+
+ return ci_copy_bytes_to_smc(hwmgr, smu_data->mc_reg_table_start,
+ (uint8_t *)&smu_data->mc_regs, sizeof(SMU7_Discrete_MCRegisters), SMC_RAM_END);
+}
+
+static int ci_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ uint8_t count, level;
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[level].clk
+ >= data->vbios_boot_state.sclk_bootup_value) {
+ smu_data->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_mclk->count);
+
+ for (level = 0; level < count; level++) {
+ if (hwmgr->dyn_state.vddc_dependency_on_mclk->entries[level].clk
+ >= data->vbios_boot_state.mclk_bootup_value) {
+ smu_data->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_smc_svi2_config(struct pp_hwmgr *hwmgr,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
+ return 0;
+}
+
+static int ci_start_smc(struct pp_hwmgr *hwmgr)
+{
+ /* set smc instruct start point at 0x0 */
+ ci_program_jump_on_start(hwmgr);
+
+ /* enable smc clock */
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 0);
+
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
+ INTERRUPTS_ENABLED, 1);
+
+ return 0;
+}
+
+int ci_init_smc_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_DpmTable *table = &(smu_data->smc_state_table);
+ struct pp_atomctrl_gpio_pin_assignment gpio_pin;
+ u32 i;
+
+ ci_initialize_power_tune_defaults(hwmgr);
+ memset(&(smu_data->smc_state_table), 0x00, sizeof(smu_data->smc_state_table));
+
+ if (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control)
+ ci_populate_smc_voltage_tables(hwmgr, table);
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_AutomaticDCTransition))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_StepVddc))
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (data->is_memory_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (data->ulv_supported) {
+ result = ci_populate_ulv_state(hwmgr, &(table->Ulv));
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ULV state!", return result);
+
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ ixCG_ULV_PARAMETER, 0x40035);
+ }
+
+ result = ci_populate_all_graphic_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Graphics Level!", return result);
+
+ result = ci_populate_all_memory_levels(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Memory Level!", return result);
+
+ result = ci_populate_smc_link_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Link Level!", return result);
+
+ result = ci_populate_smc_acpi_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACPI Level!", return result);
+
+ result = ci_populate_smc_vce_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize VCE Level!", return result);
+
+ result = ci_populate_smc_acp_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize ACP Level!", return result);
+
+ result = ci_populate_smc_samu_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize SAMU Level!", return result);
+
+ /* Since only the initial state is completely set up at this point (the other states are just copies of the boot state) we only */
+ /* need to populate the ARB settings for the initial state. */
+ result = ci_program_memory_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to Write ARB settings for the initial state.", return result);
+
+ result = ci_populate_smc_uvd_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize UVD Level!", return result);
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
+
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ result = ci_populate_smc_boot_level(hwmgr, table);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to initialize Boot Level!", return result);
+
+ result = ci_populate_smc_initial_state(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to initialize Boot State!", return result);
+
+ result = ci_populate_bapm_parameters_in_dpm_table(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result, "Failed to populate BAPM Parameters!", return result);
+
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+
+ table->TemperatureLimitHigh =
+ (data->thermal_temp_setting.temperature_high *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ table->TemperatureLimitLow =
+ (data->thermal_temp_setting.temperature_low *
+ SMU7_Q88_FORMAT_CONVERSION_UNIT) / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+
+ PP_ASSERT_WITH_CODE((1 <= data->dpm_table.pcie_speed_table.count),
+ "There must be 1 or more PCIE levels defined in PPTable.",
+ return -EINVAL);
+
+ table->PCIeBootLinkLevel = (uint8_t)data->dpm_table.pcie_speed_table.count;
+ table->PCIeGenInterval = 1;
+
+ ci_populate_smc_svi2_config(hwmgr, table);
+
+ for (i = 0; i < SMU7_MAX_ENTRIES_SMIO; i++)
+ CONVERT_FROM_HOST_TO_SMC_UL(table->Smio[i]);
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+ if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, &gpio_pin)) {
+ table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ } else {
+ table->VRHotGpio = SMU7_UNUSED_GPIO_PIN;
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_RegulatorHot);
+ }
+
+ table->AcDcGpio = SMU7_UNUSED_GPIO_PIN;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddcPhase);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskVddciVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMaskMvddVid);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
+ CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
+ table->VddcVddciDelta = PP_HOST_TO_SMC_US(table->VddcVddciDelta);
+ CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
+ CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
+
+ table->BootVddc = PP_HOST_TO_SMC_US(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = PP_HOST_TO_SMC_US(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
+
+ /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
+ result = ci_copy_bytes_to_smc(hwmgr, smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (uint8_t *)&(table->SystemFlags),
+ sizeof(SMU7_Discrete_DpmTable)-3 * sizeof(SMU7_PIDController),
+ SMC_RAM_END);
+
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to upload dpm data to SMC memory!", return result;);
+
+ result = ci_populate_initial_mc_reg_table(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == result),
+ "Failed to populate initialize MC Reg table!", return result);
+
+ result = ci_populate_pm_fuses(hwmgr);
+ PP_ASSERT_WITH_CODE(0 == result,
+ "Failed to populate PM fuses to SMC memory!", return result);
+
+ ci_start_smc(hwmgr);
+
+ return 0;
+}
+
+int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
+{
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
+ uint32_t duty100;
+ uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
+ uint16_t fdo_min, slope1, slope2;
+ uint32_t reference_clock;
+ int res;
+ uint64_t tmp64;
+
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl))
+ return 0;
+
+ if (hwmgr->thermal_controller.fanInfo.bNoFan) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ if (0 == ci_data->fan_table_start) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_FDO_CTRL1, FMAX_DUTY100);
+
+ if (0 == duty100) {
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl);
+ return 0;
+ }
+
+ tmp64 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin * duty100;
+ do_div(tmp64, 10000);
+ fdo_min = (uint16_t)tmp64;
+
+ t_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usTMed - hwmgr->thermal_controller.advanceFanControlParameters.usTMin;
+ t_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usTHigh - hwmgr->thermal_controller.advanceFanControlParameters.usTMed;
+
+ pwm_diff1 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin;
+ pwm_diff2 = hwmgr->thermal_controller.advanceFanControlParameters.usPWMHigh - hwmgr->thermal_controller.advanceFanControlParameters.usPWMMed;
+
+ slope1 = (uint16_t)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
+ slope2 = (uint16_t)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
+
+ fan_table.TempMin = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMin) / 100);
+ fan_table.TempMed = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMed) / 100);
+ fan_table.TempMax = cpu_to_be16((50 + hwmgr->thermal_controller.advanceFanControlParameters.usTMax) / 100);
+
+ fan_table.Slope1 = cpu_to_be16(slope1);
+ fan_table.Slope2 = cpu_to_be16(slope2);
+
+ fan_table.FdoMin = cpu_to_be16(fdo_min);
+
+ fan_table.HystDown = cpu_to_be16(hwmgr->thermal_controller.advanceFanControlParameters.ucTHyst);
+
+ fan_table.HystUp = cpu_to_be16(1);
+
+ fan_table.HystSlope = cpu_to_be16(1);
+
+ fan_table.TempRespLim = cpu_to_be16(5);
+
+ reference_clock = smu7_get_xclk(hwmgr);
+
+ fan_table.RefreshPeriod = cpu_to_be32((hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay * reference_clock) / 1600);
+
+ fan_table.FdoMax = cpu_to_be16((uint16_t)duty100);
+
+ fan_table.TempSrc = (uint8_t)PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_MULT_THERMAL_CTRL, TEMP_SEL);
+
+ res = ci_copy_bytes_to_smc(hwmgr, ci_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+
+ return 0;
+}
+
+static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ if (data->need_update_smu7_dpm_table &
+ (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ return ci_program_memory_timing_parameters(hwmgr);
+
+ return 0;
+}
+
+int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+ int result = 0;
+ uint32_t low_sclk_interrupt_threshold = 0;
+
+ if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_SclkThrottleLowNotification)
+ && (hwmgr->gfx_arbiter.sclk_threshold !=
+ data->low_sclk_interrupt_threshold)) {
+ data->low_sclk_interrupt_threshold =
+ hwmgr->gfx_arbiter.sclk_threshold;
+ low_sclk_interrupt_threshold =
+ data->low_sclk_interrupt_threshold;
+
+ CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
+
+ result = ci_copy_bytes_to_smc(
+ hwmgr,
+ smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable,
+ LowSclkInterruptT),
+ (uint8_t *)&low_sclk_interrupt_threshold,
+ sizeof(uint32_t),
+ SMC_RAM_END);
+ }
+
+ result = ci_update_and_upload_mc_reg_table(hwmgr);
+
+ PP_ASSERT_WITH_CODE((0 == result), "Failed to upload MC reg table!", return result);
+
+ result = ci_program_mem_timing_parameters(hwmgr);
+ PP_ASSERT_WITH_CODE((result == 0),
+ "Failed to program memory timing parameters!",
+ );
+
+ return result;
+}
+
+uint32_t ci_get_offsetof(uint32_t type, uint32_t member)
+{
+ switch (type) {
+ case SMU_SoftRegisters:
+ switch (member) {
+ case HandshakeDisables:
+ return offsetof(SMU7_SoftRegisters, HandshakeDisables);
+ case VoltageChangeTimeout:
+ return offsetof(SMU7_SoftRegisters, VoltageChangeTimeout);
+ case AverageGraphicsActivity:
+ return offsetof(SMU7_SoftRegisters, AverageGraphicsA);
+ case PreVBlankGap:
+ return offsetof(SMU7_SoftRegisters, PreVBlankGap);
+ case VBlankTimeout:
+ return offsetof(SMU7_SoftRegisters, VBlankTimeout);
+ }
+ case SMU_Discrete_DpmTable:
+ switch (member) {
+ case LowSclkInterruptThreshold:
+ return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT);
+ }
+ }
+ pr_debug("can't get the offset of type %x member %x\n", type, member);
+ return 0;
+}
+
+uint32_t ci_get_mac_definition(uint32_t value)
+{
+ switch (value) {
+ case SMU_MAX_LEVELS_GRAPHICS:
+ return SMU7_MAX_LEVELS_GRAPHICS;
+ case SMU_MAX_LEVELS_MEMORY:
+ return SMU7_MAX_LEVELS_MEMORY;
+ case SMU_MAX_LEVELS_LINK:
+ return SMU7_MAX_LEVELS_LINK;
+ case SMU_MAX_ENTRIES_SMIO:
+ return SMU7_MAX_ENTRIES_SMIO;
+ case SMU_MAX_LEVELS_VDDC:
+ return SMU7_MAX_LEVELS_VDDC;
+ case SMU_MAX_LEVELS_VDDCI:
+ return SMU7_MAX_LEVELS_VDDCI;
+ case SMU_MAX_LEVELS_MVDD:
+ return SMU7_MAX_LEVELS_MVDD;
+ }
+
+ pr_debug("can't get the mac of %x\n", value);
+ return 0;
+}
+
+static int ci_load_smc_ucode(struct pp_hwmgr *hwmgr)
+{
+ uint32_t byte_count, start_addr;
+ uint8_t *src;
+ uint32_t data;
+
+ struct cgs_firmware_info info = {0};
+
+ cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_SMU, &info);
+
+ hwmgr->is_kicker = info.is_kicker;
+ byte_count = info.image_size;
+ src = (uint8_t *)info.kptr;
+ start_addr = info.ucode_start_address;
+
+ if (byte_count > SMC_RAM_END) {
+ pr_err("SMC address is beyond the SMC RAM area.\n");
+ return -EINVAL;
+ }
+
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+
+ for (; byte_count >= 4; byte_count -= 4) {
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
+ src += 4;
+ }
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+
+ if (0 != byte_count) {
+ pr_err("SMC size must be dividable by 4\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_upload_firmware(struct pp_hwmgr *hwmgr)
+{
+ if (ci_is_smc_ram_running(hwmgr)) {
+ pr_info("smc is running, no need to load smc firmware\n");
+ return 0;
+ }
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
+ boot_seq_done, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_MISC_CNTL,
+ pre_fetcher_en, 1);
+
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 1);
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_RESET_CNTL, rst_reg, 1);
+ return ci_load_smc_ucode(hwmgr);
+}
+
+int ci_process_firmware_header(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct ci_smumgr *ci_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+
+ uint32_t tmp = 0;
+ int result;
+ bool error = false;
+
+ if (ci_upload_firmware(hwmgr))
+ return -EINVAL;
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ ci_data->dpm_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result) {
+ data->soft_regs_start = tmp;
+ ci_data->soft_regs_start = tmp;
+ }
+
+ error |= (0 != result);
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ ci_data->mc_reg_table_start = tmp;
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ ci_data->fan_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ ci_data->arb_table_start = tmp;
+
+ error |= (0 != result);
+
+ result = ci_read_smc_sram_dword(hwmgr,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, Version),
+ &tmp, SMC_RAM_END);
+
+ if (0 == result)
+ hwmgr->microcode_version_info.SMC = tmp;
+
+ error |= (0 != result);
+
+ return error ? 1 : 0;
+}
+
+static uint8_t ci_get_memory_modile_index(struct pp_hwmgr *hwmgr)
+{
+ return (uint8_t) (0xFF & (cgs_read_register(hwmgr->device, mmBIOS_SCRATCH_4) >> 16));
+}
+
+static bool ci_check_s0_mc_reg_index(uint16_t in_reg, uint16_t *out_reg)
+{
+ bool result = true;
+
+ switch (in_reg) {
+ case mmMC_SEQ_RAS_TIMING:
+ *out_reg = mmMC_SEQ_RAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_DLL_STBY:
+ *out_reg = mmMC_SEQ_DLL_STBY_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD0:
+ *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CMD1:
+ *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
+ break;
+
+ case mmMC_SEQ_G5PDX_CTRL:
+ *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
+ break;
+
+ case mmMC_SEQ_CAS_TIMING:
+ *out_reg = mmMC_SEQ_CAS_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING:
+ *out_reg = mmMC_SEQ_MISC_TIMING_LP;
+ break;
+
+ case mmMC_SEQ_MISC_TIMING2:
+ *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CMD:
+ *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
+ break;
+
+ case mmMC_SEQ_PMG_DVS_CTL:
+ *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D0:
+ *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_RD_CTL_D1:
+ *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D0:
+ *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_D1:
+ *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
+ break;
+
+ case mmMC_PMG_CMD_EMRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS1:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ break;
+
+ case mmMC_SEQ_PMG_TIMING:
+ *out_reg = mmMC_SEQ_PMG_TIMING_LP;
+ break;
+
+ case mmMC_PMG_CMD_MRS2:
+ *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
+ break;
+
+ case mmMC_SEQ_WR_CTL_2:
+ *out_reg = mmMC_SEQ_WR_CTL_2_LP;
+ break;
+
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static int ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+ uint32_t i;
+ uint16_t address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address)
+ ? address : table->mc_reg_address[i].s1;
+ }
+ return 0;
+}
+
+static int ci_copy_vbios_smc_reg_table(const pp_atomctrl_mc_reg_table *table,
+ struct ci_mc_reg_table *ni_table)
+{
+ uint8_t i, j;
+
+ PP_ASSERT_WITH_CODE((table->last <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ PP_ASSERT_WITH_CODE((table->num_entries <= MAX_AC_TIMING_ENTRIES),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ for (i = 0; i < table->last; i++)
+ ni_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ni_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ni_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++) {
+ ni_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ }
+
+ ni_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int ci_set_mc_special_registers(struct pp_hwmgr *hwmgr,
+ struct ci_mc_reg_table *table)
+{
+ uint8_t i, j, k;
+ uint32_t temp_reg;
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ switch (table->mc_reg_address[i].s1) {
+
+ case mmMC_SEQ_MISC1:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) |
+ ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+
+ if (!data->is_memory_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+
+ if (!data->is_memory_gddr5 && j < SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE) {
+ table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
+ table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ }
+
+ break;
+
+ case mmMC_SEQ_RESERVE_M:
+ temp_reg = cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
+ table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) |
+ (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ PP_ASSERT_WITH_CODE((j <= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE),
+ "Invalid VramInfo table.", return -EINVAL);
+ break;
+
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static int ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+ uint8_t i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->validflag |= (1 << i);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
+{
+ int result;
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)(hwmgr->smu_backend);
+ pp_atomctrl_mc_reg_table *table;
+ struct ci_mc_reg_table *ni_table = &smu_data->mc_reg_table;
+ uint8_t module_index = ci_get_memory_modile_index(hwmgr);
+
+ table = kzalloc(sizeof(pp_atomctrl_mc_reg_table), GFP_KERNEL);
+
+ if (NULL == table)
+ return -ENOMEM;
+
+ /* Program additional LP registers that are no longer programmed by VBIOS */
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_CAS_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_CAS_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_DLL_STBY_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_DLL_STBY));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CMD1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_G5PDX_CTRL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CMD));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_DVS_CTL));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_MISC_TIMING2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_EMRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_EMRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS1_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D0));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_RD_CTL_D1));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_TIMING_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_PMG_TIMING));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_PMG_CMD_MRS2_LP, cgs_read_register(hwmgr->device, mmMC_PMG_CMD_MRS2));
+ cgs_write_register(hwmgr->device, mmMC_SEQ_WR_CTL_2_LP, cgs_read_register(hwmgr->device, mmMC_SEQ_WR_CTL_2));
+
+ memset(table, 0x00, sizeof(pp_atomctrl_mc_reg_table));
+
+ result = atomctrl_initialize_mc_reg_table(hwmgr, module_index, table);
+
+ if (0 == result)
+ result = ci_copy_vbios_smc_reg_table(table, ni_table);
+
+ if (0 == result) {
+ ci_set_s0_mc_reg_index(ni_table);
+ result = ci_set_mc_special_registers(hwmgr, ni_table);
+ }
+
+ if (0 == result)
+ ci_set_valid_flag(ni_table);
+
+ kfree(table);
+
+ return result;
+}
+
+bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
+{
+ return ci_is_smc_ram_running(hwmgr);
+}
+
+int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request)
+{
+ struct ci_smumgr *smu_data = (struct ci_smumgr *)
+ (hwmgr->smu_backend);
+ struct SMU7_Discrete_GraphicsLevel *levels =
+ smu_data->smc_state_table.GraphicsLevel;
+ uint32_t array = smu_data->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ uint32_t array_size = sizeof(struct SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ uint32_t i;
+
+ for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
+ levels[i].ActivityLevel =
+ cpu_to_be16(request->activity_threshold);
+ levels[i].EnabledForActivity = 1;
+ levels[i].UpH = request->up_hyst;
+ levels[i].DownH = request->down_hyst;
+ }
+
+ return ci_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
+ array_size, SMC_RAM_END);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h
new file mode 100644
index 000000000000..cc4176d2d25f
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smc.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef CI_SMC_H
+#define CI_SMC_H
+
+#include <linux/types.h>
+
+
+struct pp_smumgr;
+struct pp_hwmgr;
+struct amd_pp_profile;
+
+int ci_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
+ uint16_t msg, uint32_t parameter);
+int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int ci_populate_all_graphic_levels(struct pp_hwmgr *hwmgr);
+int ci_populate_all_memory_levels(struct pp_hwmgr *hwmgr);
+int ci_init_smc_table(struct pp_hwmgr *hwmgr);
+int ci_thermal_setup_fan_table(struct pp_hwmgr *hwmgr);
+int ci_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type);
+int ci_update_sclk_threshold(struct pp_hwmgr *hwmgr);
+uint32_t ci_get_offsetof(uint32_t type, uint32_t member);
+uint32_t ci_get_mac_definition(uint32_t value);
+int ci_process_firmware_header(struct pp_hwmgr *hwmgr);
+int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr);
+bool ci_is_dpm_running(struct pp_hwmgr *hwmgr);
+int ci_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
+ struct amd_pp_profile *request);
+
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
new file mode 100644
index 000000000000..f265f42a7ed3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include "linux/delay.h"
+
+#include "smumgr.h"
+#include "ci_smumgr.h"
+#include "cgs_common.h"
+#include "ci_smc.h"
+
+static int ci_smu_init(struct pp_hwmgr *hwmgr)
+{
+ int i;
+ struct ci_smumgr *ci_priv = NULL;
+
+ ci_priv = kzalloc(sizeof(struct ci_smumgr), GFP_KERNEL);
+
+ if (ci_priv == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ ci_priv->activity_target[i] = 30;
+
+ hwmgr->smu_backend = ci_priv;
+
+ return 0;
+}
+
+static int ci_smu_fini(struct pp_hwmgr *hwmgr)
+{
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ return 0;
+}
+
+static int ci_start_smu(struct pp_hwmgr *hwmgr)
+{
+ return 0;
+}
+
+const struct pp_smumgr_func ci_smu_funcs = {
+ .smu_init = ci_smu_init,
+ .smu_fini = ci_smu_fini,
+ .start_smu = ci_start_smu,
+ .check_fw_load_finish = NULL,
+ .request_smu_load_fw = NULL,
+ .request_smu_load_specific_fw = NULL,
+ .send_msg_to_smc = ci_send_msg_to_smc,
+ .send_msg_to_smc_with_parameter = ci_send_msg_to_smc_with_parameter,
+ .download_pptable_settings = NULL,
+ .upload_pptable_settings = NULL,
+ .get_offsetof = ci_get_offsetof,
+ .process_firmware_header = ci_process_firmware_header,
+ .init_smc_table = ci_init_smc_table,
+ .update_sclk_threshold = ci_update_sclk_threshold,
+ .thermal_setup_fan_table = ci_thermal_setup_fan_table,
+ .populate_all_graphic_levels = ci_populate_all_graphic_levels,
+ .populate_all_memory_levels = ci_populate_all_memory_levels,
+ .get_mac_definition = ci_get_mac_definition,
+ .initialize_mc_reg_table = ci_initialize_mc_reg_table,
+ .is_dpm_running = ci_is_dpm_running,
+ .populate_requested_graphic_levels = ci_populate_requested_graphic_levels,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
new file mode 100644
index 000000000000..8189cfa17c46
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _CI_SMUMANAGER_H_
+#define _CI_SMUMANAGER_H_
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+
+#include "smu7_discrete.h"
+#include <pp_endian.h>
+#include "ppatomctrl.h"
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+struct ci_mc_reg_entry {
+ uint32_t mclk_max;
+ uint32_t mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ uint8_t last;
+ uint8_t num_entries;
+ uint16_t validflag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_smumgr {
+ uint32_t soft_regs_start;
+ uint32_t dpm_table_start;
+ uint32_t mc_reg_table_start;
+ uint32_t fan_table_start;
+ uint32_t arb_table_start;
+ uint32_t ulv_setting_starts;
+ struct SMU7_Discrete_DpmTable smc_state_table;
+ struct SMU7_Discrete_PmFuses power_tune_table;
+ const struct ci_pt_defaults *power_tune_defaults;
+ SMU7_Discrete_MCRegisters mc_regs;
+ struct ci_mc_reg_table mc_reg_table;
+ uint32_t activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+
+};
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index 652aaa43e95c..78ab0556e48f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -52,53 +52,52 @@ static const enum cz_scratch_entry firmware_list[] = {
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
};
-static int cz_smum_get_argument(struct pp_smumgr *smumgr)
+static int cz_smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- return cgs_read_register(smumgr->device,
+ return cgs_read_register(hwmgr->device,
mmSMU_MP1_SRBM2P_ARG_0);
}
-static int cz_send_msg_to_smc_async(struct pp_smumgr *smumgr,
- uint16_t msg)
+static int cz_send_msg_to_smc_async(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ result = PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
if (result != 0) {
pr_err("cz_send_msg_to_smc_async (0x%04x) failed\n", msg);
return result;
}
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_RESP_0, 0);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_MSG_0, msg);
return 0;
}
/* Send a message to the SMC, and wait for its response.*/
-static int cz_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+static int cz_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int result = 0;
- result = cz_send_msg_to_smc_async(smumgr, msg);
+ result = cz_send_msg_to_smc_async(hwmgr, msg);
if (result != 0)
return result;
- return SMUM_WAIT_FIELD_UNEQUAL(smumgr,
+ return PHM_WAIT_FIELD_UNEQUAL(hwmgr,
SMU_MP1_SRBM2P_RESP_0, CONTENT, 0);
}
-static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
+static int cz_set_smc_sram_address(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t limit)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
if (0 != (3 & smc_address)) {
@@ -111,39 +110,39 @@ static int cz_set_smc_sram_address(struct pp_smumgr *smumgr,
return -EINVAL;
}
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX_0,
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX_0,
SMN_MP1_SRAM_START_ADDR + smc_address);
return 0;
}
-static int cz_write_smc_sram_dword(struct pp_smumgr *smumgr,
+static int cz_write_smc_sram_dword(struct pp_hwmgr *hwmgr,
uint32_t smc_address, uint32_t value, uint32_t limit)
{
int result;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- result = cz_set_smc_sram_address(smumgr, smc_address, limit);
+ result = cz_set_smc_sram_address(hwmgr, smc_address, limit);
if (!result)
- cgs_write_register(smumgr->device, mmMP0PUB_IND_DATA_0, value);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_DATA_0, value);
return result;
}
-static int cz_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+static int cz_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0, parameter);
- return cz_send_msg_to_smc(smumgr, msg);
+ return cz_send_msg_to_smc(hwmgr, msg);
}
-static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
+static int cz_check_fw_load_finish(struct pp_hwmgr *hwmgr,
uint32_t firmware)
{
int i;
@@ -151,19 +150,19 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cgs_write_register(smumgr->device, mmMP0PUB_IND_INDEX, index);
+ cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index);
- for (i = 0; i < smumgr->usec_timeout; i++) {
+ for (i = 0; i < hwmgr->usec_timeout; i++) {
if (firmware ==
- (cgs_read_register(smumgr->device, mmMP0PUB_IND_DATA) & firmware))
+ (cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA) & firmware))
break;
udelay(1);
}
- if (i >= smumgr->usec_timeout) {
+ if (i >= hwmgr->usec_timeout) {
pr_err("SMU check loaded firmware failed.\n");
return -EINVAL;
}
@@ -171,7 +170,7 @@ static int cz_check_fw_load_finish(struct pp_smumgr *smumgr,
return 0;
}
-static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
+static int cz_load_mec_firmware(struct pp_hwmgr *hwmgr)
{
uint32_t reg_data;
uint32_t tmp;
@@ -179,44 +178,44 @@ static int cz_load_mec_firmware(struct pp_smumgr *smumgr)
struct cgs_firmware_info info = {0};
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
- ret = cgs_get_firmware_info(smumgr->device,
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
+ ret = cgs_get_firmware_info(hwmgr->device,
CGS_UCODE_ID_CP_MEC, &info);
if (ret)
return -EINVAL;
/* Disable MEC parsing/prefetching */
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_MEC_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
- tmp = SMUM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
+ tmp = PHM_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, tmp);
- tmp = cgs_read_register(smumgr->device,
+ tmp = cgs_read_register(hwmgr->device,
mmCP_CPC_IC_BASE_CNTL);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
- tmp = SMUM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
+ tmp = PHM_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_CNTL, tmp);
reg_data = smu_lower_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_LO, reg_data);
reg_data = smu_upper_32_bits(info.mc_addr) &
- SMUM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
- cgs_write_register(smumgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
+ PHM_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
+ cgs_write_register(hwmgr->device, mmCP_CPC_IC_BASE_HI, reg_data);
return 0;
}
-static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_hwmgr *hwmgr,
enum cz_scratch_entry firmware_enum)
{
uint8_t ret = 0;
@@ -226,7 +225,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_SDMA0;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_SDMA0;
else
ret = UCODE_ID_SDMA1;
@@ -244,7 +243,7 @@ static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
ret = UCODE_ID_CP_MEC_JT1;
break;
case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
ret = UCODE_ID_CP_MEC_JT1;
else
ret = UCODE_ID_CP_MEC_JT2;
@@ -326,17 +325,17 @@ static enum cgs_ucode_id cz_convert_fw_type_to_cgs(uint32_t fw_type)
}
static int cz_smu_populate_single_scratch_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
uint8_t type, bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = type;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -363,17 +362,17 @@ static int cz_smu_populate_single_scratch_task(
}
static int cz_smu_populate_single_ucode_load_task(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry fw_enum,
bool is_last)
{
uint8_t i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
task->type = TASK_TYPE_UCODE_LOAD;
- task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
+ task->arg = cz_translate_firmware_enum_to_arg(hwmgr, fw_enum);
task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -392,22 +391,22 @@ static int cz_smu_populate_single_ucode_load_task(
return 0;
}
-static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_rlc_aram_save(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_aram = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_SAVE, true);
return 0;
}
-static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
+static int cz_smu_initialize_toc_empty_job_list(struct pp_hwmgr *hwmgr)
{
int i;
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
@@ -416,17 +415,17 @@ static int cz_smu_initialize_toc_empty_job_list(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_SAVE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_SAVE, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_SAVE, true);
@@ -434,121 +433,120 @@ static int cz_smu_construct_toc_for_vddgfx_enter(struct pp_smumgr *smumgr)
}
-static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
struct TOC *toc = (struct TOC *)cz_smu->toc_buffer.kaddr;
toc->JobList[JOB_GFX_RESTORE] = (uint8_t)cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id == CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id == CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
else
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
/* populate scratch */
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
TASK_TYPE_UCODE_LOAD, false);
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
TASK_TYPE_UCODE_LOAD, true);
return 0;
}
-static int cz_smu_construct_toc_for_power_profiling(
- struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_power_profiling(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_power_profiling_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_bootup(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_initialize_index = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
- if (smumgr->chip_id != CHIP_STONEY)
- cz_smu_populate_single_ucode_load_task(smumgr,
+ if (hwmgr->chip_id != CHIP_STONEY)
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
- cz_smu_populate_single_ucode_load_task(smumgr,
+ cz_smu_populate_single_ucode_load_task(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
return 0;
}
-static int cz_smu_construct_toc_for_clock_table(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc_for_clock_table(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_clock_table = cz_smu->toc_entry_used_count;
- cz_smu_populate_single_scratch_task(smumgr,
+ cz_smu_populate_single_scratch_task(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
TASK_TYPE_INITIALIZE, true);
return 0;
}
-static int cz_smu_construct_toc(struct pp_smumgr *smumgr)
+static int cz_smu_construct_toc(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
cz_smu->toc_entry_used_count = 0;
- cz_smu_initialize_toc_empty_job_list(smumgr);
- cz_smu_construct_toc_for_rlc_aram_save(smumgr);
- cz_smu_construct_toc_for_vddgfx_enter(smumgr);
- cz_smu_construct_toc_for_vddgfx_exit(smumgr);
- cz_smu_construct_toc_for_power_profiling(smumgr);
- cz_smu_construct_toc_for_bootup(smumgr);
- cz_smu_construct_toc_for_clock_table(smumgr);
+ cz_smu_initialize_toc_empty_job_list(hwmgr);
+ cz_smu_construct_toc_for_rlc_aram_save(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_enter(hwmgr);
+ cz_smu_construct_toc_for_vddgfx_exit(hwmgr);
+ cz_smu_construct_toc_for_power_profiling(hwmgr);
+ cz_smu_construct_toc_for_bootup(hwmgr);
+ cz_smu_construct_toc_for_clock_table(hwmgr);
return 0;
}
-static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
+static int cz_smu_populate_firmware_entries(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
uint32_t firmware_type;
uint32_t i;
int ret;
@@ -559,12 +557,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
- firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
+ firmware_type = cz_translate_firmware_enum_to_arg(hwmgr,
firmware_list[i]);
ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
ucode_id, &info);
if (ret == 0) {
@@ -585,12 +583,12 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
}
static int cz_smu_populate_single_scratch_entry(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
enum cz_scratch_entry scratch_type,
uint32_t ulsize_byte,
struct cz_buffer_entry *entry)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
long long mc_addr =
((long long)(cz_smu->smu_buffer.mc_addr_high) << 32)
| cz_smu->smu_buffer.mc_addr_low;
@@ -611,9 +609,9 @@ static int cz_smu_populate_single_scratch_entry(
return 0;
}
-static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
+static int cz_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -624,25 +622,25 @@ static int cz_download_pptable_settings(struct pp_smumgr *smumgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)cz_smu->scratch_buffer[i].kaddr;
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToDram);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
return 0;
}
-static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
+static int cz_upload_pptable_settings(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)smumgr->backend;
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
unsigned long i;
for (i = 0; i < cz_smu->scratch_buffer_length; i++) {
@@ -651,63 +649,63 @@ static int cz_upload_pptable_settings(struct pp_smumgr *smumgr)
break;
}
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
cz_smu->scratch_buffer[i].mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
cz_smu->scratch_buffer[i].mc_addr_low);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_clock_table);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_ClkTableXferToSmu);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
return 0;
}
-static int cz_request_smu_load_fw(struct pp_smumgr *smumgr)
+static int cz_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct cz_smumgr *cz_smu = (struct cz_smumgr *)(smumgr->backend);
+ struct cz_smumgr *cz_smu = (struct cz_smumgr *)(hwmgr->smu_backend);
uint32_t smc_address;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
- cz_smu_populate_firmware_entries(smumgr);
+ cz_smu_populate_firmware_entries(hwmgr);
- cz_smu_construct_toc(smumgr);
+ cz_smu_construct_toc(hwmgr);
smc_address = SMU8_FIRMWARE_HEADER_LOCATION +
offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
- cz_write_smc_sram_dword(smumgr, smc_address, 0, smc_address+4);
+ cz_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
cz_smu->toc_buffer.mc_addr_high);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
cz_smu->toc_buffer.mc_addr_low);
- cz_send_msg_to_smc(smumgr, PPSMC_MSG_InitJobs);
+ cz_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
- cz_send_msg_to_smc_with_parameter(smumgr,
+ cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_aram);
- cz_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_ExecuteJob,
+ cz_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_power_profiling_index);
- return cz_send_msg_to_smc_with_parameter(smumgr,
+ return cz_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
cz_smu->toc_entry_initialize_index);
}
-static int cz_start_smu(struct pp_smumgr *smumgr)
+static int cz_start_smu(struct pp_hwmgr *hwmgr)
{
int ret = 0;
uint32_t fw_to_check = 0;
@@ -721,23 +719,23 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK;
- if (smumgr->chip_id == CHIP_STONEY)
+ if (hwmgr->chip_id == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
- ret = cz_request_smu_load_fw(smumgr);
+ ret = cz_request_smu_load_fw(hwmgr);
if (ret)
pr_err("SMU firmware load failed\n");
- cz_check_fw_load_finish(smumgr, fw_to_check);
+ cz_check_fw_load_finish(hwmgr, fw_to_check);
- ret = cz_load_mec_firmware(smumgr);
+ ret = cz_load_mec_firmware(hwmgr);
if (ret)
pr_err("Mec Firmware load failed\n");
return ret;
}
-static int cz_smu_init(struct pp_smumgr *smumgr)
+static int cz_smu_init(struct pp_hwmgr *hwmgr)
{
uint64_t mc_addr = 0;
int ret = 0;
@@ -747,7 +745,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
if (cz_smu == NULL)
return -ENOMEM;
- smumgr->backend = cz_smu;
+ hwmgr->smu_backend = cz_smu;
cz_smu->toc_buffer.data_size = 4096;
cz_smu->smu_buffer.data_size =
@@ -757,7 +755,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->toc_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -770,7 +768,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->toc_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->toc_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- ret = smu_allocate_memory(smumgr->device,
+ ret = smu_allocate_memory(hwmgr->device,
cz_smu->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -783,7 +781,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
cz_smu->smu_buffer.mc_addr_high = smu_upper_32_bits(mc_addr);
cz_smu->smu_buffer.mc_addr_low = smu_lower_32_bits(mc_addr);
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -791,14 +789,14 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
pr_err("Error when Populate Firmware Entry.\n");
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -806,7 +804,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
sizeof(struct SMU8_MultimediaPowerLogData),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -814,7 +812,7 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return -1;
}
- if (0 != cz_smu_populate_single_scratch_entry(smumgr,
+ if (0 != cz_smu_populate_single_scratch_entry(hwmgr,
CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
sizeof(struct SMU8_Fusion_ClkTable),
&cz_smu->scratch_buffer[cz_smu->scratch_buffer_length++])) {
@@ -825,18 +823,18 @@ static int cz_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int cz_smu_fini(struct pp_smumgr *smumgr)
+static int cz_smu_fini(struct pp_hwmgr *hwmgr)
{
struct cz_smumgr *cz_smu;
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
- cz_smu = (struct cz_smumgr *)smumgr->backend;
+ cz_smu = (struct cz_smumgr *)hwmgr->smu_backend;
if (cz_smu) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->toc_buffer.handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
cz_smu->smu_buffer.handle);
kfree(cz_smu);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
index 8712f093d6d9..b1a66b5ada4a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c
@@ -198,7 +198,7 @@ static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t *sda)
static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -216,7 +216,7 @@ static void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
SMU73_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
@@ -299,7 +299,7 @@ static int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
@@ -314,7 +314,7 @@ static int fiji_populate_svi_load_line(struct pp_hwmgr *hwmgr)
static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
@@ -334,11 +334,11 @@ static int fiji_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
const struct fiji_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU73_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
@@ -359,7 +359,7 @@ static int fiji_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -370,7 +370,7 @@ static int fiji_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
if ((hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity & (1 << 15)) ||
@@ -389,7 +389,7 @@ static int fiji_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -398,14 +398,9 @@ static int fiji_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
return 0;
}
-static int fiji_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
@@ -426,11 +421,11 @@ static int fiji_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
uint32_t pm_fuse_table_offset;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, SMC_RAM_END))
@@ -472,19 +467,13 @@ static int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr)
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
- /* DW19 */
- if (fiji_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
/* DW20 */
if (fiji_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
"Sidd Failed!", return -EINVAL);
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
sizeof(struct SMU73_Discrete_PmFuses), SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
@@ -586,7 +575,7 @@ static int fiji_populate_smc_link_level(struct pp_hwmgr *hwmgr,
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
int i;
/* Index (dpm_table->pcie_speed_table.count)
@@ -774,7 +763,7 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
struct phm_ppt_v1_information *table_info =
@@ -859,7 +848,7 @@ int fiji_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
levels[1].pcieDpmLevel = mid_pcie_level_enabled;
}
/* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
(uint32_t)array_size, SMC_RAM_END);
return result;
@@ -1000,7 +989,7 @@ static int fiji_populate_single_memory_level(struct pp_hwmgr *hwmgr,
int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
/* populate MCLK dpm table to SMU7 */
@@ -1043,7 +1032,7 @@ int fiji_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
PPSMC_DISPLAY_WATERMARK_HIGH;
/* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
(uint32_t)array_size, SMC_RAM_END);
return result;
@@ -1352,7 +1341,7 @@ static int fiji_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct SMU73_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
int result = 0;
@@ -1370,7 +1359,7 @@ static int fiji_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
if (!result)
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.arb_table_start,
(uint8_t *)&arb_regs,
sizeof(SMU73_Discrete_MCArbDramTimingTable),
@@ -1460,7 +1449,7 @@ static int fiji_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
static int fiji_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint8_t count, level;
@@ -1491,7 +1480,7 @@ static int fiji_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
volt_with_cks, value;
uint16_t clock_freq_u16;
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
volt_offset = 0;
struct phm_ppt_v1_information *table_info =
@@ -1694,9 +1683,9 @@ static int fiji_populate_vr_config(struct pp_hwmgr *hwmgr,
return 0;
}
-static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
+static int fiji_init_arb_table_index(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
@@ -1708,7 +1697,7 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
* In reality this field should not be in that structure
* but in a soft register.
*/
- result = smu7_read_smc_sram_dword(smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
if (result)
@@ -1717,13 +1706,13 @@ static int fiji_init_arb_table_index(struct pp_smumgr *smumgr)
tmp &= 0x00FFFFFF;
tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
- return smu7_write_smc_sram_dword(smumgr,
+ return smu7_write_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
}
static int fiji_save_default_power_profile(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct SMU73_Discrete_GraphicsLevel *levels =
data->smc_state_table.GraphicsLevel;
unsigned min_level = 1;
@@ -1782,7 +1771,7 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
}
}
if (mask)
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_LedConfig,
mask);
return 0;
@@ -1799,7 +1788,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct SMU73_Discrete_DpmTable *table = &(smu_data->smc_state_table);
@@ -1985,7 +1974,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ result = smu7_copy_bytes_to_smc(hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU73_Discrete_DpmTable, SystemFlags),
(uint8_t *)&(table->SystemFlags),
@@ -1994,7 +1983,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload dpm data to SMC memory!", return result);
- result = fiji_init_arb_table_index(hwmgr->smumgr);
+ result = fiji_init_arb_table_index(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload arb data to SMC memory!", return result);
@@ -2022,7 +2011,7 @@ int fiji_init_smc_table(struct pp_hwmgr *hwmgr)
*/
int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
SMU73_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
@@ -2104,20 +2093,20 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_CTRL, TEMP_SEL);
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
SMC_RAM_END);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
@@ -2133,13 +2122,12 @@ int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
return 0;
- ret = smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
if (!ret)
/* If this param is not changed, this function could fire unnecessarily */
@@ -2162,7 +2150,7 @@ static int fiji_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2179,7 +2167,7 @@ int fiji_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU73_Discrete_DpmTable,
LowSclkInterruptThreshold),
@@ -2256,7 +2244,7 @@ uint32_t fiji_get_mac_definition(uint32_t value)
static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2280,7 +2268,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDDPM) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
return 0;
@@ -2288,7 +2276,7 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2312,7 +2300,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
return 0;
@@ -2320,7 +2308,7 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
{
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
@@ -2339,7 +2327,7 @@ static int fiji_update_samu_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SAMUDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
return 0;
@@ -2373,12 +2361,12 @@ int fiji_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smumgr->backend);
+ struct fiji_smumgr *smu_data = (struct fiji_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
@@ -2388,7 +2376,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
@@ -2400,7 +2388,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
@@ -2408,7 +2396,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
if (!result)
smu_data->smu7_data.mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
@@ -2418,7 +2406,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
@@ -2428,7 +2416,7 @@ int fiji_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, Version),
&tmp, SMC_RAM_END);
@@ -2476,7 +2464,7 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
struct fiji_smumgr *smu_data = (struct fiji_smumgr *)
- (hwmgr->smumgr->backend);
+ (hwmgr->smu_backend);
struct SMU73_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
@@ -2493,6 +2481,6 @@ int fiji_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
levels[i].DownHyst = request->down_hyst;
}
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
array_size, SMC_RAM_END);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 6ae948fc524f..5b25e067b2f1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -58,122 +58,122 @@ static const struct SMU73_Discrete_GraphicsLevel avfs_graphics_level[8] = {
{ 0xf811d047, 0x80380100, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0x21680000, 0x12000000, 0, 0, 0x0c, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00 }
};
-static int fiji_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ /* PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0); */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for ROM firmware to initialize interrupt hendler */
- /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, SMC_IND,
+ /*SMUM_WAIT_VFPF_INDIRECT_REGISTER(hwmgr, SMC_IND,
SMC_INTR_CNTL_MASK_0, 0x10040, 0xFFFFFFFF); */
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS) != 1) {
PP_ASSERT_WITH_CODE(false,
"SMU Firmware start failed!", return -1);
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int fiji_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int fiji_setup_pwr_virus(struct pp_hwmgr *hwmgr)
{
int i;
int result = -EINVAL;
uint32_t reg, data;
const PWR_Command_Table *pvirus = PwrVirusTable;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
switch (pvirus->command) {
case PwrCmdWrite:
reg = pvirus->reg;
data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
+ cgs_write_register(hwmgr->device, reg, data);
break;
case PwrCmdEnd:
@@ -192,13 +192,13 @@ static int fiji_setup_pwr_virus(struct pp_smumgr *smumgr)
return result;
}
-static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
+static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
@@ -206,23 +206,23 @@ static int fiji_start_avfs_btc(struct pp_smumgr *smumgr)
}
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
/* clear reset */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
return result;
}
-static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int fiji_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
int32_t vr_config;
uint32_t table_start;
uint32_t level_addr, vr_config_addr;
uint32_t level_size = sizeof(avfs_graphics_level);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, DpmTable),
&table_start, 0x40000),
@@ -237,7 +237,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_addr = table_start +
offsetof(SMU73_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_addr,
(uint8_t *)&vr_config, sizeof(int32_t), 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Problems copying "
"vr_config value over to SMC",
@@ -245,7 +245,7 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
level_addr = table_start + offsetof(SMU73_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, level_addr,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, level_addr,
(uint8_t *)(&avfs_graphics_level), level_size, 0x40000),
"[AVFS][Fiji_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1;);
@@ -253,9 +253,9 @@ static int fiji_setup_graphics_level_structure(struct pp_smumgr *smumgr)
return 0;
}
-static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
+static int fiji_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool smu_started)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -265,17 +265,17 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
if (!smu_started)
break;
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_graphics_level_structure(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not Copy Graphics Level"
" table over to SMU",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_setup_pwr_virus(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Could not setup "
"Pwr Virus for AVFS ",
return -EINVAL;);
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == fiji_start_avfs_btc(hwmgr),
"[AVFS][fiji_avfs_event_mgr] Failure at "
"fiji_start_avfs_btc. AVFS Disabled",
return -EINVAL;);
@@ -293,64 +293,64 @@ static int fiji_avfs_event_mgr(struct pp_smumgr *smumgr, bool smu_started)
return 0;
}
-static int fiji_start_smu(struct pp_smumgr *smumgr)
+static int fiji_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend);
+ struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend);
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr)
- || cgs_is_virtualization_enabled(smumgr->device))) {
- fiji_avfs_event_mgr(smumgr, false);
+ if (!(smu7_is_smc_ram_running(hwmgr)
+ || cgs_is_virtualization_enabled(hwmgr->device))) {
+ fiji_avfs_event_mgr(hwmgr, false);
/* Check if SMU is running in protected mode */
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = fiji_start_smu_in_non_protection_mode(smumgr);
+ result = fiji_start_smu_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = fiji_start_smu_in_protection_mode(smumgr);
+ result = fiji_start_smu_in_protection_mode(hwmgr);
if (result)
return result;
}
- fiji_avfs_event_mgr(smumgr, true);
+ fiji_avfs_event_mgr(hwmgr, true);
}
/* To initialize all clock gating before RLC loaded and running.*/
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE);
- cgs_set_clockgating_state(smumgr->device,
+ cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE);
/* Setup SoftRegsStart here for register lookup in case
* DummyBackEnd is used and ProcessFirmwareHeader is not executed
*/
- smu7_read_smc_sram_dword(smumgr,
+ smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU73_Firmware_Header, SoftRegisters),
&(priv->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse = 0;
uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1;
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
- if (!atomctrl_read_efuse(smumgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
+ if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB,
mask, &efuse)) {
if (efuse)
return true;
@@ -365,7 +365,7 @@ static bool fiji_is_hw_avfs_present(struct pp_smumgr *smumgr)
* @param smc_addr the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
-static int fiji_smu_init(struct pp_smumgr *smumgr)
+static int fiji_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct fiji_smumgr *fiji_priv = NULL;
@@ -375,9 +375,9 @@ static int fiji_smu_init(struct pp_smumgr *smumgr)
if (fiji_priv == NULL)
return -ENOMEM;
- smumgr->backend = fiji_priv;
+ hwmgr->smu_backend = fiji_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU73_MAX_LEVELS_GRAPHICS; i++)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
index 51adf04ab4b3..efb0fc033274 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smc.c
@@ -101,7 +101,7 @@ static const struct iceland_pt_defaults defaults_icelandpro = {
static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
@@ -130,7 +130,7 @@ static void iceland_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
@@ -144,7 +144,7 @@ static int iceland_populate_svi_load_line(struct pp_hwmgr *hwmgr)
static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
tdc_limit = (uint16_t)(hwmgr->dyn_state.cac_dtp_table->usTDC * 256);
@@ -159,11 +159,11 @@ static int iceland_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int iceland_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU71_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
@@ -184,7 +184,7 @@ static int iceland_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 8; i++)
@@ -193,14 +193,9 @@ static int iceland_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint16_t HiSidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
uint16_t LoSidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd;
struct phm_cac_tdp_table *cac_table = hwmgr->dyn_state.cac_dtp_table;
@@ -219,7 +214,7 @@ static int iceland_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint8_t *hi_vid = smu_data->power_tune_table.BapmVddCVidHiSidd;
uint8_t *lo_vid = smu_data->power_tune_table.BapmVddCVidLoSidd;
@@ -245,7 +240,7 @@ static int iceland_populate_bapm_vddc_vid_sidd(struct pp_hwmgr *hwmgr)
static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
{
int i;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint8_t *vid = smu_data->power_tune_table.VddCVid;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -264,12 +259,12 @@ static int iceland_populate_vddc_vid(struct pp_hwmgr *hwmgr)
static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, SMC_RAM_END))
@@ -317,19 +312,13 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
- /* DW17 */
- if (iceland_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
/* DW18 */
if (iceland_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo Sidd Failed!",
return -EINVAL);
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
sizeof(struct SMU71_Discrete_PmFuses), SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
@@ -339,7 +328,7 @@ static int iceland_populate_pm_fuses(struct pp_hwmgr *hwmgr)
return 0;
}
-static int iceland_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+static int iceland_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
struct phm_clock_voltage_dependency_table *allowed_clock_voltage_table,
uint32_t clock, uint32_t *vol)
{
@@ -601,7 +590,7 @@ static int iceland_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU71_Discret
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint32_t i;
/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
@@ -749,7 +738,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
result = iceland_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
/* populate graphics levels*/
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddc_dependency_on_sclk, engine_clock,
&graphic_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
@@ -816,7 +805,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
uint32_t level_array_adress = smu_data->smu7_data.dpm_table_start +
offsetof(SMU71_Discrete_DpmTable, GraphicsLevel);
@@ -892,7 +881,7 @@ int iceland_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
/* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_adress,
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_adress,
(uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
@@ -1104,7 +1093,7 @@ static int iceland_populate_single_memory_level(
uint32_t mclk_strobe_mode_threshold = 40000;
if (hwmgr->dyn_state.vddc_dependency_on_mclk != NULL) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddc_dependency_on_mclk, memory_clock, &memory_level->MinVddc);
PP_ASSERT_WITH_CODE((0 == result),
"can not find MinVddc voltage value from memory VDDC voltage dependency table", return result);
@@ -1113,7 +1102,7 @@ static int iceland_populate_single_memory_level(
if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE) {
memory_level->MinVddci = memory_level->MinVddc;
} else if (NULL != hwmgr->dyn_state.vddci_dependency_on_mclk) {
- result = iceland_get_dependecy_volt_by_clk(hwmgr,
+ result = iceland_get_dependency_volt_by_clk(hwmgr,
hwmgr->dyn_state.vddci_dependency_on_mclk,
memory_clock,
&memory_level->MinVddci);
@@ -1218,7 +1207,7 @@ static int iceland_populate_single_memory_level(
int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
@@ -1257,7 +1246,7 @@ int iceland_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
/* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ result = smu7_copy_bytes_to_smc(hwmgr,
level_array_adress, (uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
@@ -1496,7 +1485,7 @@ static int iceland_populate_memory_timing_parameters(
static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
int result = 0;
SMU71_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
@@ -1518,7 +1507,7 @@ static int iceland_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
if (0 == result) {
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.arb_table_start,
(uint8_t *)&arb_regs,
sizeof(SMU71_Discrete_MCArbDramTimingTable),
@@ -1534,7 +1523,7 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
{
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
@@ -1572,10 +1561,10 @@ static int iceland_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
return result;
}
-static int iceland_populate_mc_reg_address(struct pp_smumgr *smumgr,
+static int iceland_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
SMU71_Discrete_MCRegisters *mc_reg_table)
{
- const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)smumgr->backend;
+ const struct iceland_smumgr *smu_data = (struct iceland_smumgr *)hwmgr->smu_backend;
uint32_t i, j;
@@ -1612,13 +1601,12 @@ static void iceland_convert_mc_registers(
}
}
-static int iceland_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
+static int iceland_convert_mc_reg_table_entry_to_smc(struct pp_hwmgr *hwmgr,
const uint32_t memory_clock,
SMU71_Discrete_MCRegisterSet *mc_reg_table_data
)
{
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint32_t i = 0;
for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
@@ -1648,7 +1636,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
res = iceland_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
+ hwmgr,
data->dpm_table.mclk_table.dpm_levels[i].value,
&mc_regs->data[i]
);
@@ -1662,8 +1650,7 @@ static int iceland_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t address;
int32_t result;
@@ -1682,7 +1669,7 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
address = smu_data->smu7_data.mc_reg_table_start + (uint32_t)offsetof(SMU71_Discrete_MCRegisters, data[0]);
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, address,
+ return smu7_copy_bytes_to_smc(hwmgr, address,
(uint8_t *)&smu_data->mc_regs.data[0],
sizeof(SMU71_Discrete_MCRegisterSet) * data->dpm_table.mclk_table.count,
SMC_RAM_END);
@@ -1691,11 +1678,10 @@ static int iceland_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
memset(&smu_data->mc_regs, 0x00, sizeof(SMU71_Discrete_MCRegisters));
- result = iceland_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ result = iceland_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for the MC register addresses!", return result;);
@@ -1703,14 +1689,14 @@ static int iceland_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to initialize MCRegTable for driver state!", return result;);
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
(uint8_t *)&smu_data->mc_regs, sizeof(SMU71_Discrete_MCRegisters), SMC_RAM_END);
}
static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
uint8_t count, level;
count = (uint8_t)(hwmgr->dyn_state.vddc_dependency_on_sclk->count);
@@ -1739,7 +1725,7 @@ static int iceland_populate_smc_initial_state(struct pp_hwmgr *hwmgr)
static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
const struct iceland_pt_defaults *defaults = smu_data->power_tune_defaults;
SMU71_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
struct phm_cac_tdp_table *cac_dtp_table = hwmgr->dyn_state.cac_dtp_table;
@@ -1776,7 +1762,7 @@ static int iceland_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_PkgPwrLimit);
CONVERT_FROM_HOST_TO_SMC_US(dpm_table->PPM_TemperatureLimit);
- dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ dpm_table->BAPM_TEMP_GRADIENT = PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
def1 = defaults->bapmti_r;
def2 = defaults->bapmti_rc;
@@ -1827,7 +1813,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
SMU71_Discrete_DpmTable *table = &(smu_data->smc_state_table);
@@ -1955,7 +1941,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
table->BootMVdd = PP_HOST_TO_SMC_US(table->BootMVdd * VOLTAGE_SCALE);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.dpm_table_start +
+ result = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.dpm_table_start +
offsetof(SMU71_Discrete_DpmTable, SystemFlags),
(uint8_t *)&(table->SystemFlags),
sizeof(SMU71_Discrete_DpmTable)-3 * sizeof(SMU71_PIDController),
@@ -1965,7 +1951,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to upload dpm data to SMC memory!", return result;);
/* Upload all ulv setting to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ result = smu7_copy_bytes_to_smc(hwmgr,
smu_data->smu7_data.ulv_setting_starts,
(uint8_t *)&(smu_data->ulv_setting),
sizeof(SMU71_Discrete_Ulv),
@@ -1994,7 +1980,7 @@ int iceland_init_smc_table(struct pp_hwmgr *hwmgr)
*/
int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
SMU71_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
@@ -2064,7 +2050,7 @@ int iceland_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
/* fan_table.FanControl_GL_Flag = 1; */
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
+ res = smu7_copy_bytes_to_smc(hwmgr, smu7_data->fan_table_start, (uint8_t *)&fan_table, (uint32_t)sizeof(fan_table), SMC_RAM_END);
return 0;
}
@@ -2084,7 +2070,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2101,7 +2087,7 @@ int iceland_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU71_Discrete_DpmTable,
LowSclkInterruptThreshold),
@@ -2182,13 +2168,13 @@ uint32_t iceland_get_mac_definition(uint32_t value)
int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smumgr->backend);
+ struct smu7_smumgr *smu7_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
@@ -2199,7 +2185,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
@@ -2212,7 +2198,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
@@ -2221,7 +2207,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
smu7_data->mc_reg_table_start = tmp;
}
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
@@ -2232,7 +2218,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
@@ -2244,7 +2230,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, Version),
&tmp, SMC_RAM_END);
@@ -2255,7 +2241,7 @@ int iceland_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU71_FIRMWARE_HEADER_LOCATION +
offsetof(SMU71_Firmware_Header, UlvSettings),
&tmp, SMC_RAM_END);
@@ -2522,7 +2508,7 @@ static int iceland_set_valid_flag(struct iceland_mc_reg_table *table)
int iceland_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smumgr->backend);
+ struct iceland_smumgr *smu_data = (struct iceland_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *table;
struct iceland_mc_reg_table *ni_table = &smu_data->mc_reg_table;
uint8_t module_index = iceland_get_memory_modile_index(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 0bf2def3b659..78aa1122eacc 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -39,55 +39,55 @@
#define ICELAND_SMC_SIZE 0x20000
-static int iceland_start_smc(struct pp_smumgr *smumgr)
+static int iceland_start_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
return 0;
}
-static void iceland_reset_smc(struct pp_smumgr *smumgr)
+static void iceland_reset_smc(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
}
-static void iceland_stop_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_stop_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 1);
}
-static void iceland_start_smc_clock(struct pp_smumgr *smumgr)
+static void iceland_start_smc_clock(struct pp_hwmgr *hwmgr)
{
- SMUM_WRITE_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0,
ck_disable, 0);
}
-static int iceland_smu_start_smc(struct pp_smumgr *smumgr)
+static int iceland_smu_start_smc(struct pp_hwmgr *hwmgr)
{
/* set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
/* enable smc clock */
- iceland_start_smc_clock(smumgr);
+ iceland_start_smc_clock(hwmgr);
/* de-assert reset */
- iceland_start_smc(smumgr);
+ iceland_start_smc(hwmgr);
- SMUM_WAIT_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS,
+ PHM_WAIT_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS,
INTERRUPTS_ENABLED, 1);
return 0;
}
-static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
+static int iceland_upload_smc_firmware_data(struct pp_hwmgr *hwmgr,
uint32_t length, const uint8_t *src,
uint32_t limit, uint32_t start_addr)
{
@@ -96,17 +96,17 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_0, start_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_0, start_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 1);
while (byte_count >= 4) {
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_0, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_0, data);
src += 4;
byte_count -= 4;
}
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, 0);
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
@@ -114,16 +114,16 @@ static int iceland_upload_smc_firmware_data(struct pp_smumgr *smumgr,
}
-static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
+static int iceland_smu_upload_firmware_image(struct pp_hwmgr *hwmgr)
{
uint32_t val;
struct cgs_firmware_info info = {0};
- if (smumgr == NULL || smumgr->device == NULL)
+ if (hwmgr == NULL || hwmgr->device == NULL)
return -EINVAL;
/* load SMC firmware */
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
if (info.image_size & 3) {
@@ -137,56 +137,56 @@ static int iceland_smu_upload_firmware_image(struct pp_smumgr *smumgr)
}
/* wait for smc boot up */
- SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/* clear firmware interrupt enable flag */
- val = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ val = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMC_SYSCON_MISC_CNTL, val | 1);
/* stop smc clock */
- iceland_stop_smc_clock(smumgr);
+ iceland_stop_smc_clock(hwmgr);
/* reset smc */
- iceland_reset_smc(smumgr);
- iceland_upload_smc_firmware_data(smumgr, info.image_size,
+ iceland_reset_smc(hwmgr);
+ iceland_upload_smc_firmware_data(hwmgr, info.image_size,
(uint8_t *)info.kptr, ICELAND_SMC_SIZE,
info.ucode_start_address);
return 0;
}
-static int iceland_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
+static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr,
uint32_t firmwareType)
{
return 0;
}
-static int iceland_start_smu(struct pp_smumgr *smumgr)
+static int iceland_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
pr_info("smu not running, upload firmware again \n");
- result = iceland_smu_upload_firmware_image(smumgr);
+ result = iceland_smu_upload_firmware_image(hwmgr);
if (result)
return result;
- result = iceland_smu_start_smc(smumgr);
+ result = iceland_smu_start_smc(hwmgr);
if (result)
return result;
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
@@ -198,7 +198,7 @@ static int iceland_start_smu(struct pp_smumgr *smumgr)
* @param smcAddress the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
-static int iceland_smu_init(struct pp_smumgr *smumgr)
+static int iceland_smu_init(struct pp_hwmgr *hwmgr)
{
int i;
struct iceland_smumgr *iceland_priv = NULL;
@@ -208,9 +208,9 @@ static int iceland_smu_init(struct pp_smumgr *smumgr)
if (iceland_priv == NULL)
return -ENOMEM;
- smumgr->backend = iceland_priv;
+ hwmgr->smu_backend = iceland_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU71_MAX_LEVELS_GRAPHICS; i++)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
index 8eae01b37c40..802472530d34 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.h
@@ -39,7 +39,7 @@ struct iceland_pt_defaults {
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
- uint32_t bamp_temp_gradient;
+ uint32_t bapm_temp_gradient;
uint16_t bapmti_r[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
uint16_t bapmti_rc[SMU71_DTE_ITERATIONS * SMU71_DTE_SOURCES * SMU71_DTE_SINKS];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
index 99a00bd39256..c92ea38d2e15 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c
@@ -148,7 +148,7 @@ static uint16_t scale_fan_gain_settings(uint16_t raw_setting)
static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
@@ -196,7 +196,7 @@ static int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmg
static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn;
@@ -210,7 +210,7 @@ static int polaris10_populate_svi_load_line(struct pp_hwmgr *hwmgr)
static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
@@ -227,11 +227,11 @@ static int polaris10_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
const struct polaris10_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU74_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
@@ -252,7 +252,7 @@ static int polaris10_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_of
static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -263,7 +263,7 @@ static int polaris10_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
/* TO DO move to hwmgr */
if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15))
@@ -279,7 +279,7 @@ static int polaris10_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -288,14 +288,9 @@ static int polaris10_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
return 0;
}
-static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
@@ -315,12 +310,12 @@ static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr
static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, SMC_RAM_END))
@@ -358,17 +353,12 @@ static int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr)
"Attempt to populate GnbLPML Failed!",
return -EINVAL);
- if (polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML Min and Max Vid Failed!",
- return -EINVAL);
-
if (polaris10_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(false,
"Attempt to populate BapmVddCBaseLeakage Hi and Lo "
"Sidd Failed!", return -EINVAL);
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
(sizeof(struct SMU74_Discrete_PmFuses) - 92), SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
@@ -494,7 +484,6 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
state->CcPwrDynRm = 0;
state->CcPwrDynRm1 = 0;
@@ -503,7 +492,7 @@ static int polaris10_populate_ulv_level(struct pp_hwmgr *hwmgr,
state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset *
VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
- if (smumgr->chip_id == CHIP_POLARIS12 || smumgr->is_kicker)
+ if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker)
state->VddcPhase = data->vddc_phase_shed_control ^ 0x3;
else
state->VddcPhase = (data->vddc_phase_shed_control) ? 0 : 1;
@@ -525,7 +514,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
struct SMU74_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int i;
@@ -556,8 +545,7 @@ static int polaris10_populate_smc_link_level(struct pp_hwmgr *hwmgr,
static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
SMU74_Discrete_DpmTable *table)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t i, ref_clk;
struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } };
@@ -607,8 +595,7 @@ static void polaris10_get_sclk_range_table(struct pp_hwmgr *hwmgr,
static int polaris10_calculate_sclk_params(struct pp_hwmgr *hwmgr,
uint32_t clock, SMU_SclkSetting *sclk_setting)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
const SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
struct pp_atomctrl_clock_dividers_ai dividers;
uint32_t ref_clock;
@@ -751,9 +738,8 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
*/
int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -838,7 +824,7 @@ int polaris10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
levels[1].pcieDpmLevel = mid_pcie_level_enabled;
}
/* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(smumgr, array, (uint8_t *)levels,
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
(uint32_t)array_size, SMC_RAM_END);
return result;
@@ -880,7 +866,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
if (mclk_stutter_mode_threshold &&
(clock <= mclk_stutter_mode_threshold) &&
- (SMUM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
+ (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL,
STUTTER_ENABLE) & 0x1))
mem_level->StutterEnable = true;
@@ -900,9 +886,8 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
*/
int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &hw_data->dpm_table;
int result;
/* populate MCLK dpm table to SMU7 */
@@ -943,7 +928,7 @@ int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
/* level count will send to smc once at init smc table and never change */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
(uint32_t)array_size, SMC_RAM_END);
return result;
@@ -1201,9 +1186,8 @@ static int polaris10_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr,
static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct SMU74_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
int result = 0;
@@ -1222,7 +1206,7 @@ static int polaris10_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
}
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.arb_table_start,
(uint8_t *)&arb_regs,
sizeof(SMU74_Discrete_MCArbDramTimingTable),
@@ -1321,9 +1305,8 @@ static int polaris10_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint8_t count, level;
@@ -1354,8 +1337,7 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
{
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
@@ -1438,7 +1420,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
struct SMU74_Discrete_DpmTable *table)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint16_t config;
config = VR_MERGED_WITH_VDDC;
@@ -1482,8 +1464,7 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
int result = 0;
@@ -1534,20 +1515,20 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
}
- result = smu7_read_smc_sram_dword(smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
&tmp, SMC_RAM_END);
- smu7_copy_bytes_to_smc(smumgr,
+ smu7_copy_bytes_to_smc(hwmgr,
tmp,
(uint8_t *)&AVFS_meanNsigma,
sizeof(AVFS_meanNsigma_t),
SMC_RAM_END);
- result = smu7_read_smc_sram_dword(smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
&tmp, SMC_RAM_END);
- smu7_copy_bytes_to_smc(smumgr,
+ smu7_copy_bytes_to_smc(hwmgr,
tmp,
(uint8_t *)&AVFS_SclkOffset,
sizeof(AVFS_Sclk_Offset_t),
@@ -1569,9 +1550,9 @@ static int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
+static int polaris10_init_arb_table_index(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
@@ -1583,7 +1564,7 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
* In reality this field should not be in that structure
* but in a soft register.
*/
- result = smu7_read_smc_sram_dword(smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
if (result)
@@ -1592,13 +1573,13 @@ static int polaris10_init_arb_table_index(struct pp_smumgr *smumgr)
tmp &= 0x00FFFFFF;
tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
- return smu7_write_smc_sram_dword(smumgr,
+ return smu7_write_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
}
static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1615,7 +1596,7 @@ static void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct SMU74_Discrete_GraphicsLevel *levels =
data->smc_state_table.GraphicsLevel;
unsigned min_level = 1;
@@ -1658,9 +1639,9 @@ static void polaris10_save_default_power_profile(struct pp_hwmgr *hwmgr)
int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
+
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct SMU74_Discrete_DpmTable *table = &(smu_data->smc_state_table);
@@ -1852,7 +1833,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime);
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ result = smu7_copy_bytes_to_smc(hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU74_Discrete_DpmTable, SystemFlags),
(uint8_t *)&(table->SystemFlags),
@@ -1861,7 +1842,7 @@ int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload dpm data to SMC memory!", return result);
- result = polaris10_init_arb_table_index(hwmgr->smumgr);
+ result = polaris10_init_arb_table_index(hwmgr);
PP_ASSERT_WITH_CODE(0 == result,
"Failed to upload arb data to SMC memory!", return result);
@@ -1888,17 +1869,16 @@ static int polaris10_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
int ret;
- struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
return 0;
- ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
- ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
+ ret = (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs) == 0) ?
0 : -1;
if (!ret)
@@ -1919,7 +1899,7 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
*/
int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
SMU74_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
@@ -2000,20 +1980,20 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
hwmgr->device, CGS_IND_REG__SMC,
CG_MULT_THERMAL_CTRL, TEMP_SEL);
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr, smu_data->smu7_data.fan_table_start,
+ res = smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.fan_table_start,
(uint8_t *)&fan_table, (uint32_t)sizeof(fan_table),
SMC_RAM_END);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
advanceFanControlParameters.ucMinimumPWMLimit);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
- res = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
@@ -2027,7 +2007,7 @@ int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2051,7 +2031,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDDPM) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
return 0;
@@ -2059,7 +2039,7 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2083,7 +2063,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
return 0;
@@ -2091,7 +2071,7 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
@@ -2110,7 +2090,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SAMUDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
return 0;
@@ -2119,7 +2099,7 @@ static int polaris10_update_samu_smc_table(struct pp_hwmgr *hwmgr)
static int polaris10_update_bif_smc_table(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table;
@@ -2157,7 +2137,7 @@ int polaris10_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2174,7 +2154,7 @@ int polaris10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU74_Discrete_DpmTable,
LowSclkInterruptThreshold),
@@ -2262,13 +2242,13 @@ uint32_t polaris10_get_mac_definition(uint32_t value)
*/
int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
{
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t tmp;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
@@ -2278,7 +2258,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
@@ -2290,7 +2270,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
@@ -2298,7 +2278,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
if (!result)
smu_data->smu7_data.mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
@@ -2308,7 +2288,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
@@ -2318,7 +2298,7 @@ int polaris10_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (0 != result);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION +
offsetof(SMU74_Firmware_Header, Version),
&tmp, SMC_RAM_END);
@@ -2342,7 +2322,7 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)
- (hwmgr->smumgr->backend);
+ (hwmgr->smu_backend);
struct SMU74_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
@@ -2359,6 +2339,6 @@ int polaris10_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
levels[i].DownHyst = request->down_hyst;
}
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
array_size, SMC_RAM_END);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 75f43dadc56b..22b8ecbf7fce 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -60,21 +60,21 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = {
0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
-static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
+static int polaris10_setup_pwr_virus(struct pp_hwmgr *hwmgr)
{
int i;
int result = -EINVAL;
uint32_t reg, data;
const PWR_Command_Table *pvirus = pwr_virus_table;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
for (i = 0; i < PWR_VIRUS_TABLE_SIZE; i++) {
switch (pvirus->command) {
case PwrCmdWrite:
reg = pvirus->reg;
data = pvirus->data;
- cgs_write_register(smumgr->device, reg, data);
+ cgs_write_register(hwmgr->device, reg, data);
break;
case PwrCmdEnd:
@@ -93,13 +93,13 @@ static int polaris10_setup_pwr_virus(struct pp_smumgr *smumgr)
return result;
}
-static int polaris10_perform_btc(struct pp_smumgr *smumgr)
+static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs.avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
+ if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs.avfs_btc_param)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -107,16 +107,16 @@ static int polaris10_perform_btc(struct pp_smumgr *smumgr)
if (smu_data->avfs.avfs_btc_param > 1) {
/* Soft-Reset to reset the engine before loading uCode */
/* halt */
- cgs_write_register(smumgr->device, mmCP_MEC_CNTL, 0x50000000);
+ cgs_write_register(hwmgr->device, mmCP_MEC_CNTL, 0x50000000);
/* reset everything */
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
- cgs_write_register(smumgr->device, mmGRBM_SOFT_RESET, 0);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0xffffffff);
+ cgs_write_register(hwmgr->device, mmGRBM_SOFT_RESET, 0);
}
return result;
}
-static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
+static int polaris10_setup_graphics_level_structure(struct pp_hwmgr *hwmgr)
{
uint32_t vr_config;
uint32_t dpm_table_start;
@@ -127,7 +127,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_size = sizeof(avfs_graphics_level_polaris10);
u16_boot_mvdd = PP_HOST_TO_SMC_US(1300 * VOLTAGE_SCALE);
- PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_read_smc_sram_dword(hwmgr,
SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, DpmTable),
&dpm_table_start, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] SMU could not communicate starting address of DPM table",
@@ -138,14 +138,14 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
vr_config_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VRConfig);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, vr_config_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, vr_config_address,
(uint8_t *)&vr_config, sizeof(uint32_t), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Problems copying VRConfig value over to SMC",
return -1);
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, GraphicsLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_graphics_level_polaris10),
graphics_level_size, 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of SCLK DPM table failed!",
@@ -153,7 +153,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, MemoryLevel);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&avfs_memory_level_polaris10), sizeof(avfs_memory_level_polaris10), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of MCLK DPM table failed!",
return -1);
@@ -162,7 +162,7 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
graphics_level_address = dpm_table_start + offsetof(SMU74_Discrete_DpmTable, BootMVdd);
- PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(smumgr, graphics_level_address,
+ PP_ASSERT_WITH_CODE(0 == smu7_copy_bytes_to_smc(hwmgr, graphics_level_address,
(uint8_t *)(&u16_boot_mvdd), sizeof(u16_boot_mvdd), 0x40000),
"[AVFS][Polaris10_SetupGfxLvlStruct] Copying of DPM table failed!",
return -1);
@@ -172,9 +172,9 @@ static int polaris10_setup_graphics_level_structure(struct pp_smumgr *smumgr)
static int
-polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
+polaris10_avfs_event_mgr(struct pp_hwmgr *hwmgr, bool SMU_VFT_INTACT)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
switch (smu_data->avfs.avfs_btc_status) {
case AVFS_BTC_COMPLETED_PREVIOUSLY:
@@ -183,20 +183,20 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
case AVFS_BTC_BOOT: /* Cold Boot State - Post SMU Start */
smu_data->avfs.avfs_btc_status = AVFS_BTC_DPMTABLESETUP_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_graphics_level_structure(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not Copy Graphics Level table over to SMU",
return -EINVAL);
if (smu_data->avfs.avfs_btc_param > 1) {
pr_info("[AVFS][Polaris10_AVFSEventMgr] AC BTC has not been successfully verified on Fiji. There may be in this setting.");
smu_data->avfs.avfs_btc_status = AVFS_BTC_VIRUS_FAIL;
- PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_setup_pwr_virus(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Could not setup Pwr Virus for AVFS ",
return -EINVAL);
}
smu_data->avfs.avfs_btc_status = AVFS_BTC_FAILED;
- PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(smumgr),
+ PP_ASSERT_WITH_CODE(0 == polaris10_perform_btc(hwmgr),
"[AVFS][Polaris10_AVFSEventMgr] Failure at SmuPolaris10_PerformBTC. AVFS Disabled",
return -EINVAL);
smu_data->avfs.avfs_btc_status = AVFS_BTC_ENABLEAVFS;
@@ -215,146 +215,146 @@ polaris10_avfs_event_mgr(struct pp_smumgr *smumgr, bool SMU_VFT_INTACT)
return 0;
}
-static int polaris10_start_smu_in_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* Wait for smc boot up */
- /* SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
+ /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/* Call Test SMU message with 0x20000 offset to trigger SMU start */
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait done bit to be set */
/* Check pass/failed indicator */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0);
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_STATUS, SMU_PASS))
PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1);
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int polaris10_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0);
/* Clear firmware interrupt enable flag */
- /* SMUM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL,
rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int polaris10_start_smu(struct pp_smumgr *smumgr)
+static int polaris10_start_smu(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+ struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
bool SMU_VFT_INTACT;
/* Only start SMC if SMC RAM is not running */
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
SMU_VFT_INTACT = false;
- smu_data->protected_mode = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
- smu_data->smu7_data.security_hard_key = (uint8_t) (SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
+ smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE));
+ smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL));
/* Check if SMU is running in protected mode */
if (smu_data->protected_mode == 0) {
- result = polaris10_start_smu_in_non_protection_mode(smumgr);
+ result = polaris10_start_smu_in_non_protection_mode(hwmgr);
} else {
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
/* If failed, try with different security Key. */
if (result != 0) {
smu_data->smu7_data.security_hard_key ^= 1;
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
- result = polaris10_start_smu_in_protection_mode(smumgr);
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
+ result = polaris10_start_smu_in_protection_mode(hwmgr);
}
}
if (result != 0)
PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result);
- polaris10_avfs_event_mgr(smumgr, true);
+ polaris10_avfs_event_mgr(hwmgr, true);
} else
SMU_VFT_INTACT = true; /*Driver went offline but SMU was still alive and contains the VFT table */
- polaris10_avfs_event_mgr(smumgr, SMU_VFT_INTACT);
+ polaris10_avfs_event_mgr(hwmgr, SMU_VFT_INTACT);
/* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */
- smu7_read_smc_sram_dword(smumgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
+ smu7_read_smc_sram_dword(hwmgr, SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, SoftRegisters),
&(smu_data->smu7_data.soft_regs_start), 0x40000);
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
-static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+static bool polaris10_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
uint32_t efuse;
- efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
efuse &= 0x00000001;
if (efuse)
return true;
@@ -362,7 +362,7 @@ static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
return false;
}
-static int polaris10_smu_init(struct pp_smumgr *smumgr)
+static int polaris10_smu_init(struct pp_hwmgr *hwmgr)
{
struct polaris10_smumgr *smu_data;
int i;
@@ -371,9 +371,9 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
if (smu_data == NULL)
return -ENOMEM;
- smumgr->backend = smu_data;
+ hwmgr->smu_backend = smu_data;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU74_MAX_LEVELS_GRAPHICS; i++)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
index ce0a30388ea1..b98ade676d12 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.c
@@ -48,20 +48,20 @@
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -69,97 +69,97 @@ bool rv_is_smc_ram_running(struct pp_smumgr *smumgr)
return false;
}
-static uint32_t rv_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t rv_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
-int rv_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!rv_is_smc_ram_running(smumgr))
+ if (!rv_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
-int rv_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int rv_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int rv_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- rv_wait_for_response(smumgr);
+ rv_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- rv_send_msg_to_smc_without_waiting(smumgr, msg);
+ rv_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (rv_wait_for_response(smumgr) == 0)
+ if (rv_wait_for_response(hwmgr) == 0)
printk("Failed to send Message %x.\n", msg);
return 0;
}
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -167,16 +167,16 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -188,11 +188,11 @@ int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
return 0;
}
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL;);
@@ -204,17 +204,17 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -223,15 +223,15 @@ int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
+static int rv_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_read_arg_from_smc(hwmgr,
&smc_driver_if_version),
"Attempt to read SMC IF Version Number Failed!",
return -EINVAL);
@@ -243,9 +243,9 @@ static int rv_verify_smc_interface(struct pp_smumgr *smumgr)
}
/* sdma is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_enable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerUpSdma),
"Attempt to power up sdma Failed!",
return -EINVAL);
@@ -253,9 +253,9 @@ static int rv_smc_enable_sdma(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
+static int rv_smc_disable_sdma(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc(hwmgr,
PPSMC_MSG_PowerDownSdma),
"Attempt to power down sdma Failed!",
return -EINVAL);
@@ -264,9 +264,9 @@ static int rv_smc_disable_sdma(struct pp_smumgr *smumgr)
}
/* vcn is disabled by default in vbios, need to re-enable in driver */
-static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_enable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0),
"Attempt to power up vcn Failed!",
return -EINVAL);
@@ -274,9 +274,9 @@ static int rv_smc_enable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
+static int rv_smc_disable_vcn(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(!rv_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0),
"Attempt to power down vcn Failed!",
return -EINVAL);
@@ -284,38 +284,38 @@ static int rv_smc_disable_vcn(struct pp_smumgr *smumgr)
return 0;
}
-static int rv_smu_fini(struct pp_smumgr *smumgr)
+static int rv_smu_fini(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv =
- (struct rv_smumgr *)(smumgr->backend);
+ (struct rv_smumgr *)(hwmgr->smu_backend);
if (priv) {
- rv_smc_disable_sdma(smumgr);
- rv_smc_disable_vcn(smumgr);
- cgs_free_gpu_mem(smumgr->device,
+ rv_smc_disable_sdma(hwmgr);
+ rv_smc_disable_vcn(hwmgr);
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
priv->smu_tables.entry[CLOCKTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int rv_start_smu(struct pp_smumgr *smumgr)
+static int rv_start_smu(struct pp_hwmgr *hwmgr)
{
- if (rv_verify_smc_interface(smumgr))
+ if (rv_verify_smc_interface(hwmgr))
return -EINVAL;
- if (rv_smc_enable_sdma(smumgr))
+ if (rv_smc_enable_sdma(hwmgr))
return -EINVAL;
- if (rv_smc_enable_vcn(smumgr))
+ if (rv_smc_enable_vcn(hwmgr))
return -EINVAL;
return 0;
}
-static int rv_smu_init(struct pp_smumgr *smumgr)
+static int rv_smu_init(struct pp_hwmgr *hwmgr)
{
struct rv_smumgr *priv;
uint64_t mc_addr;
@@ -327,10 +327,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -340,8 +340,8 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[WMTABLE].version = 0x01;
@@ -355,7 +355,7 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(DpmClocks_t),
CGS_GPU_MEM_TYPE__GART_CACHEABLE,
PAGE_SIZE,
@@ -365,10 +365,10 @@ static int rv_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[rv_smu_init] Out of memory for CLOCKTABLE.",
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
return -EINVAL);
priv->smu_tables.entry[CLOCKTABLE].version = 0x01;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
index 262c8ded87c0..58888400f1b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/rv_smumgr.h
@@ -51,11 +51,11 @@ struct rv_smumgr {
struct smu_table_array smu_tables;
};
-int rv_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-bool rv_is_smc_ram_running(struct pp_smumgr *smumgr);
-int rv_copy_table_from_smc(struct pp_smumgr *smumgr,
+int rv_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+bool rv_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int rv_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int rv_copy_table_to_smc(struct pp_smumgr *smumgr,
+int rv_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index c49a6f22002f..2ae05bbdb974 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -34,18 +34,18 @@
#define SMU7_SMC_SIZE 0x20000
-static int smu7_set_smc_sram_address(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t limit)
+static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
{
PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, smc_addr);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
return 0;
}
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
{
uint32_t data;
uint32_t addr;
@@ -59,7 +59,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
addr = smc_start_address;
while (byte_count >= 4) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*dest = PP_SMC_TO_HOST_UL(data);
@@ -69,7 +69,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
if (byte_count) {
- smu7_read_smc_sram_dword(smumgr, addr, &data, limit);
+ smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
*pdata = PP_SMC_TO_HOST_UL(data);
/* Cast dest into byte type in dest_byte. This way, we don't overflow if the allocated memory is not 4-byte aligned. */
dest_byte = (uint8_t *)dest;
@@ -81,7 +81,7 @@ int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_addres
}
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit)
{
int result;
@@ -99,12 +99,12 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
/* Bytes are written into the SMC addres space with the MSB first. */
data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
src += 4;
byte_count -= 4;
@@ -115,13 +115,13 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data = 0;
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- original_data = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
extra_shift = 8 * (4 - byte_count);
@@ -135,53 +135,53 @@ int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
data |= (original_data & ~((~0UL) << extra_shift));
- result = smu7_set_smc_sram_address(smumgr, addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, addr, limit);
if (0 != result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, data);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
}
return 0;
}
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr)
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
{
static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
- smu7_copy_bytes_to_smc(smumgr, 0x0, data, 4, sizeof(data)+1);
+ smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
return 0;
}
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr)
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
- return ((0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
- && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
+ return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
+ && (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
}
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
int ret;
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send pre message %x ret is %d \n", msg, ret);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+ ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
pr_info("\n failed to send message %x ret is %d \n", msg, ret);
@@ -189,53 +189,53 @@ int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg)
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
return 0;
}
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- if (!smu7_is_smc_ram_running(smumgr)) {
+ if (!smu7_is_smc_ram_running(hwmgr)) {
return -EINVAL;
}
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc(smumgr, msg);
+ return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, parameter);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
- return smu7_send_msg_to_smc_without_waiting(smumgr, msg);
+ return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
}
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr)
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(smumgr->device, mmSMC_MSG_ARG_0, 0x20000);
+ cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
+ cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
+ PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
- if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
+ if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
pr_info("Failed to send Message.\n");
return 0;
}
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr)
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr)
{
- if (!smu7_is_smc_ram_running(smumgr))
+ if (!smu7_is_smc_ram_running(hwmgr))
return -EINVAL;
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0);
return 0;
}
@@ -289,29 +289,29 @@ enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
}
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- *value = cgs_read_register(smumgr->device, mmSMC_IND_DATA_11);
+ *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
return 0;
}
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
{
int result;
- result = smu7_set_smc_sram_address(smumgr, smc_addr, limit);
+ result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
if (result)
return result;
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, value);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
return 0;
}
@@ -354,14 +354,14 @@ static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type)
return result;
}
-static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
+static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
uint32_t fw_type,
struct SMU_Entry *entry)
{
int result = 0;
struct cgs_firmware_info info = {0};
- result = cgs_get_firmware_info(smumgr->device,
+ result = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(fw_type),
&info);
@@ -374,7 +374,7 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
entry->meta_data_addr_low = 0;
/* digest need be excluded out */
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
info.image_size -= 20;
entry->data_size_byte = info.image_size;
entry->num_register_entries = 0;
@@ -389,30 +389,30 @@ static int smu7_populate_single_firmware_entry(struct pp_smumgr *smumgr,
return 0;
}
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
- if (!smumgr->reload_fw) {
+ if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
return 0;
}
if (smu_data->soft_regs_start)
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
0x0);
- if (smumgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
- if (!cgs_is_virtualization_enabled(smumgr->device)) {
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
+ if (!cgs_is_virtualization_enabled(hwmgr->device)) {
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
smu_data->smu_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr,
+ smu7_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
smu_data->smu_buffer.mc_addr_low);
}
@@ -439,80 +439,79 @@ int smu7_request_smu_load_fw(struct pp_smumgr *smumgr)
toc->num_entries = 0;
toc->structure_version = 1;
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
- PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(smumgr,
+ if (cgs_is_virtualization_enabled(hwmgr->device))
+ PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
- smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, smu_data->header_buffer.mc_addr_high);
+ smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, smu_data->header_buffer.mc_addr_low);
- if (smu7_send_msg_to_smc_with_parameter(smumgr, PPSMC_MSG_LoadUcodes, fw_to_load))
+ if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load))
pr_err("Fail to Request SMU Load uCode");
return result;
}
/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type)
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
{
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type);
uint32_t ret;
- ret = smum_wait_on_indirect_register(smumgr, mmSMC_IND_INDEX_11,
- smu_data->soft_regs_start + smum_get_offsetof(smumgr,
+ ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
+ smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
SMU_SoftRegisters, UcodeLoadStatus),
fw_mask, fw_mask);
-
return ret;
}
-int smu7_reload_firmware(struct pp_smumgr *smumgr)
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
{
- return smumgr->smumgr_funcs->start_smu(smumgr);
+ return hwmgr->smumgr_funcs->start_smu(hwmgr);
}
-static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t length, uint32_t *src, uint32_t limit)
+static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
{
uint32_t byte_count = length;
PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
- cgs_write_register(smumgr->device, mmSMC_IND_INDEX_11, 0x20000);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
+ cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
for (; byte_count >= 4; byte_count -= 4)
- cgs_write_register(smumgr->device, mmSMC_IND_DATA_11, *src++);
+ cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
- SMUM_WRITE_FIELD(smumgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
+ PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be dividable by 4.", return -EINVAL);
@@ -520,41 +519,41 @@ static int smu7_upload_smc_firmware_data(struct pp_smumgr *smumgr, uint32_t leng
}
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr)
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
{
int result = 0;
- struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
struct cgs_firmware_info info = {0};
if (smu_data->security_hard_key == 1)
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
else
- cgs_get_firmware_info(smumgr->device,
+ cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
- smumgr->is_kicker = info.is_kicker;
+ hwmgr->is_kicker = info.is_kicker;
- result = smu7_upload_smc_firmware_data(smumgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
+ result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
return result;
}
-int smu7_init(struct pp_smumgr *smumgr)
+int smu7_init(struct pp_hwmgr *hwmgr)
{
struct smu7_smumgr *smu_data;
uint8_t *internal_buf;
uint64_t mc_addr = 0;
/* Allocate memory for backend private data */
- smu_data = (struct smu7_smumgr *)(smumgr->backend);
+ smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
smu_data->header_buffer.data_size =
((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
/* Allocate FW image data structure and header buffer and
* send the header buffer address to SMU */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->header_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -568,16 +567,16 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != smu_data->header),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->header_buffer.handle);
return -EINVAL);
- if (cgs_is_virtualization_enabled(smumgr->device))
+ if (cgs_is_virtualization_enabled(hwmgr->device))
return 0;
smu_data->smu_buffer.data_size = 200*4096;
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
smu_data->smu_buffer.data_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -591,12 +590,12 @@ int smu7_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE((NULL != internal_buf),
"Out of memory.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)smu_data->smu_buffer.handle);
return -EINVAL);
- if (smum_is_hw_avfs_present(smumgr))
+ if (smum_is_hw_avfs_present(hwmgr))
smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
else
smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
@@ -605,12 +604,10 @@ int smu7_init(struct pp_smumgr *smumgr)
}
-int smu7_smu_fini(struct pp_smumgr *smumgr)
+int smu7_smu_fini(struct pp_hwmgr *hwmgr)
{
- if (smumgr->backend) {
- kfree(smumgr->backend);
- smumgr->backend = NULL;
- }
- cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU);
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
+ cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index ee5e32d2921e..0b63c5c1043c 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,32 +60,32 @@ struct smu7_smumgr {
};
-int smu7_copy_bytes_from_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
uint32_t *dest, uint32_t byte_count, uint32_t limit);
-int smu7_copy_bytes_to_smc(struct pp_smumgr *smumgr, uint32_t smc_start_address,
+int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
const uint8_t *src, uint32_t byte_count, uint32_t limit);
-int smu7_program_jump_on_start(struct pp_smumgr *smumgr);
-bool smu7_is_smc_ram_running(struct pp_smumgr *smumgr);
-int smu7_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, uint16_t msg);
-int smu7_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, uint16_t msg,
+int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
+bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
+int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
+int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_smumgr *smumgr,
+int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter);
-int smu7_send_msg_to_smc_offset(struct pp_smumgr *smumgr);
-int smu7_wait_for_smc_inactive(struct pp_smumgr *smumgr);
+int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
+int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
-int smu7_read_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t *value, uint32_t limit);
-int smu7_write_smc_sram_dword(struct pp_smumgr *smumgr, uint32_t smc_addr,
+int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
uint32_t value, uint32_t limit);
-int smu7_request_smu_load_fw(struct pp_smumgr *smumgr);
-int smu7_check_fw_load_finish(struct pp_smumgr *smumgr, uint32_t fw_type);
-int smu7_reload_firmware(struct pp_smumgr *smumgr);
-int smu7_upload_smu_firmware_image(struct pp_smumgr *smumgr);
-int smu7_init(struct pp_smumgr *smumgr);
-int smu7_smu_fini(struct pp_smumgr *smumgr);
+int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr);
+int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type);
+int smu7_reload_firmware(struct pp_hwmgr *hwmgr);
+int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr);
+int smu7_init(struct pp_hwmgr *hwmgr);
+int smu7_smu_fini(struct pp_hwmgr *hwmgr);
#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 3bdf6478de7f..867388456530 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <drm/amdgpu_drm.h>
-#include "pp_instance.h"
#include "smumgr.h"
#include "cgs_common.h"
@@ -46,88 +45,18 @@ MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_smc.bin");
MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin");
-int smum_early_init(struct pp_instance *handle)
+int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr;
-
- if (handle == NULL)
- return -EINVAL;
-
- smumgr = kzalloc(sizeof(struct pp_smumgr), GFP_KERNEL);
- if (smumgr == NULL)
- return -ENOMEM;
-
- smumgr->device = handle->device;
- smumgr->chip_family = handle->chip_family;
- smumgr->chip_id = handle->chip_id;
- smumgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
- smumgr->reload_fw = 1;
- handle->smu_mgr = smumgr;
-
- switch (smumgr->chip_family) {
- case AMDGPU_FAMILY_CZ:
- smumgr->smumgr_funcs = &cz_smu_funcs;
- break;
- case AMDGPU_FAMILY_VI:
- switch (smumgr->chip_id) {
- case CHIP_TOPAZ:
- smumgr->smumgr_funcs = &iceland_smu_funcs;
- break;
- case CHIP_TONGA:
- smumgr->smumgr_funcs = &tonga_smu_funcs;
- break;
- case CHIP_FIJI:
- smumgr->smumgr_funcs = &fiji_smu_funcs;
- break;
- case CHIP_POLARIS11:
- case CHIP_POLARIS10:
- case CHIP_POLARIS12:
- smumgr->smumgr_funcs = &polaris10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_AI:
- switch (smumgr->chip_id) {
- case CHIP_VEGA10:
- smumgr->smumgr_funcs = &vega10_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- case AMDGPU_FAMILY_RV:
- switch (smumgr->chip_id) {
- case CHIP_RAVEN:
- smumgr->smumgr_funcs = &rv_smu_funcs;
- break;
- default:
- return -EINVAL;
- }
- break;
- default:
- kfree(smumgr);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
-{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable)
- return hwmgr->smumgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_avfs_enable)
+ return hwmgr->smumgr_funcs->thermal_avfs_enable(hwmgr);
return 0;
}
-int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
- void *input, void *output, void *storage, int result)
+int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table)
- return hwmgr->smumgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->thermal_setup_fan_table)
+ return hwmgr->smumgr_funcs->thermal_setup_fan_table(hwmgr);
return 0;
}
@@ -135,8 +64,8 @@ int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr,
int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_sclk_threshold)
- return hwmgr->smumgr->smumgr_funcs->update_sclk_threshold(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->update_sclk_threshold)
+ return hwmgr->smumgr_funcs->update_sclk_threshold(hwmgr);
return 0;
}
@@ -144,163 +73,75 @@ int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr)
int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->update_smc_table)
- return hwmgr->smumgr->smumgr_funcs->update_smc_table(hwmgr, type);
+ if (NULL != hwmgr->smumgr_funcs->update_smc_table)
+ return hwmgr->smumgr_funcs->update_smc_table(hwmgr, type);
return 0;
}
-uint32_t smum_get_offsetof(struct pp_smumgr *smumgr, uint32_t type, uint32_t member)
+uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, uint32_t type, uint32_t member)
{
- if (NULL != smumgr->smumgr_funcs->get_offsetof)
- return smumgr->smumgr_funcs->get_offsetof(type, member);
+ if (NULL != hwmgr->smumgr_funcs->get_offsetof)
+ return hwmgr->smumgr_funcs->get_offsetof(type, member);
return 0;
}
int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->process_firmware_header)
- return hwmgr->smumgr->smumgr_funcs->process_firmware_header(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->process_firmware_header)
+ return hwmgr->smumgr_funcs->process_firmware_header(hwmgr);
return 0;
}
-int smum_get_argument(struct pp_smumgr *smumgr)
+int smum_get_argument(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->get_argument)
- return smumgr->smumgr_funcs->get_argument(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->get_argument)
+ return hwmgr->smumgr_funcs->get_argument(hwmgr);
return 0;
}
-uint32_t smum_get_mac_definition(struct pp_smumgr *smumgr, uint32_t value)
+uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
- if (NULL != smumgr->smumgr_funcs->get_mac_definition)
- return smumgr->smumgr_funcs->get_mac_definition(value);
+ if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
+ return hwmgr->smumgr_funcs->get_mac_definition(value);
return 0;
}
-int smum_download_powerplay_table(struct pp_smumgr *smumgr,
- void **table)
+int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table)
{
- if (NULL != smumgr->smumgr_funcs->download_pptable_settings)
- return smumgr->smumgr_funcs->download_pptable_settings(smumgr,
+ if (NULL != hwmgr->smumgr_funcs->download_pptable_settings)
+ return hwmgr->smumgr_funcs->download_pptable_settings(hwmgr,
table);
return 0;
}
-int smum_upload_powerplay_table(struct pp_smumgr *smumgr)
+int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != smumgr->smumgr_funcs->upload_pptable_settings)
- return smumgr->smumgr_funcs->upload_pptable_settings(smumgr);
+ if (NULL != hwmgr->smumgr_funcs->upload_pptable_settings)
+ return hwmgr->smumgr_funcs->upload_pptable_settings(hwmgr);
return 0;
}
-int smum_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
- if (smumgr == NULL || smumgr->smumgr_funcs->send_msg_to_smc == NULL)
+ if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc(smumgr, msg);
+ return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
}
-int smum_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
- if (smumgr == NULL ||
- smumgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
- return -EINVAL;
- return smumgr->smumgr_funcs->send_msg_to_smc_with_parameter(
- smumgr, msg, parameter);
-}
-
-/*
- * Returns once the part of the register indicated by the mask has
- * reached the given value.
- */
-int smum_wait_on_register(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device, index);
- if ((cur_value & mask) == (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic*/
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-int smum_wait_for_register_unequal(struct pp_smumgr *smumgr,
- uint32_t index,
- uint32_t value, uint32_t mask)
-{
- uint32_t i;
- uint32_t cur_value;
-
- if (smumgr == NULL)
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
return -EINVAL;
-
- for (i = 0; i < smumgr->usec_timeout; i++) {
- cur_value = cgs_read_register(smumgr->device,
- index);
- if ((cur_value & mask) != (value & mask))
- break;
- udelay(1);
- }
-
- /* timeout means wrong logic */
- if (i == smumgr->usec_timeout)
- return -1;
-
- return 0;
-}
-
-
-/*
- * Returns once the part of the register indicated by the mask
- * has reached the given value.The indirect space is described by
- * giving the memory-mapped index of the indirect index register.
- */
-int smum_wait_on_indirect_register(struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return -EINVAL;
-
- cgs_write_register(smumgr->device, indirect_port, index);
- return smum_wait_on_register(smumgr, indirect_port + 1,
- mask, value);
-}
-
-void smum_wait_for_indirect_register_unequal(
- struct pp_smumgr *smumgr,
- uint32_t indirect_port,
- uint32_t index,
- uint32_t value,
- uint32_t mask)
-{
- if (smumgr == NULL || smumgr->device == NULL)
- return;
- cgs_write_register(smumgr->device, indirect_port, index);
- smum_wait_for_register_unequal(smumgr, indirect_port + 1,
- value, mask);
+ return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+ hwmgr, msg, parameter);
}
int smu_allocate_memory(void *device, uint32_t size,
@@ -316,7 +157,7 @@ int smu_allocate_memory(void *device, uint32_t size,
return -EINVAL;
ret = cgs_alloc_gpu_mem(device, type, size, byte_align,
- 0, 0, (cgs_handle_t *)handle);
+ (cgs_handle_t *)handle);
if (ret)
return -ENOMEM;
@@ -356,24 +197,24 @@ int smu_free_memory(void *device, void *handle)
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->init_smc_table)
- return hwmgr->smumgr->smumgr_funcs->init_smc_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->init_smc_table)
+ return hwmgr->smumgr_funcs->init_smc_table(hwmgr);
return 0;
}
int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_all_graphic_levels(hwmgr);
return 0;
}
int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->populate_all_memory_levels)
+ return hwmgr->smumgr_funcs->populate_all_memory_levels(hwmgr);
return 0;
}
@@ -381,16 +222,16 @@ int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
/*this interface is needed by island ci/vi */
int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table)
- return hwmgr->smumgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->initialize_mc_reg_table)
+ return hwmgr->smumgr_funcs->initialize_mc_reg_table(hwmgr);
return 0;
}
bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
{
- if (NULL != hwmgr->smumgr->smumgr_funcs->is_dpm_running)
- return hwmgr->smumgr->smumgr_funcs->is_dpm_running(hwmgr);
+ if (NULL != hwmgr->smumgr_funcs->is_dpm_running)
+ return hwmgr->smumgr_funcs->is_dpm_running(hwmgr);
return true;
}
@@ -398,17 +239,17 @@ bool smum_is_dpm_running(struct pp_hwmgr *hwmgr)
int smum_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
- if (hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels)
- return hwmgr->smumgr->smumgr_funcs->populate_requested_graphic_levels(
+ if (hwmgr->smumgr_funcs->populate_requested_graphic_levels)
+ return hwmgr->smumgr_funcs->populate_requested_graphic_levels(
hwmgr, request);
return 0;
}
-bool smum_is_hw_avfs_present(struct pp_smumgr *smumgr)
+bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr)
{
- if (smumgr->smumgr_funcs->is_hw_avfs_present)
- return smumgr->smumgr_funcs->is_hw_avfs_present(smumgr);
+ if (hwmgr->smumgr_funcs->is_hw_avfs_present)
+ return hwmgr->smumgr_funcs->is_hw_avfs_present(hwmgr);
return false;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
index 65d3a4893958..1f720ccdaf99 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.c
@@ -97,7 +97,7 @@ static const uint8_t tonga_clock_stretch_amount_conversion[2][6] = {
*/
-static int tonga_get_dependecy_volt_by_clk(struct pp_hwmgr *hwmgr,
+static int tonga_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
phm_ppt_v1_clock_voltage_dependency_table *allowed_clock_voltage_table,
uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd)
{
@@ -406,7 +406,7 @@ static int tonga_populate_smc_link_level(struct pp_hwmgr *hwmgr, SMU72_Discrete_
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t i;
/* Index (dpm_table->pcie_speed_table.count) is reserved for PCIE boot level. */
@@ -539,7 +539,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
result = tonga_calculate_sclk_params(hwmgr, engine_clock, graphic_level);
/* populate graphics levels*/
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
pptable_info->vdd_dep_on_sclk, engine_clock,
&graphic_level->MinVoltage, &mvdd);
PP_ASSERT_WITH_CODE((!result),
@@ -598,7 +598,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr,
int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
struct phm_ppt_v1_pcie_table *pcie_table = pptable_info->pcie_table;
@@ -690,7 +690,7 @@ int tonga_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.GraphicsLevel[1].pcieDpmLevel = mid_pcie_level_enabled;
}
/* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr, level_array_address,
+ result = smu7_copy_bytes_to_smc(hwmgr, level_array_address,
(uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
@@ -895,7 +895,7 @@ static int tonga_populate_single_memory_level(
uint32_t mclk_strobe_mode_threshold = 40000;
if (NULL != pptable_info->vdd_dep_on_mclk) {
- result = tonga_get_dependecy_volt_by_clk(hwmgr,
+ result = tonga_get_dependency_volt_by_clk(hwmgr,
pptable_info->vdd_dep_on_mclk,
memory_clock,
&memory_level->MinVoltage, &mvdd);
@@ -1002,7 +1002,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
struct smu7_dpm_table *dpm_table = &data->dpm_table;
int result;
@@ -1048,7 +1048,7 @@ int tonga_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.MemoryLevel[dpm_table->mclk_table.count-1].DisplayWatermark = PPSMC_DISPLAY_WATERMARK_HIGH;
/* level count will send to smc once at init smc table and never change*/
- result = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ result = smu7_copy_bytes_to_smc(hwmgr,
level_array_address, (uint8_t *)levels, (uint32_t)level_array_size,
SMC_RAM_END);
@@ -1090,7 +1090,7 @@ static int tonga_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
{
int result = 0;
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct pp_atomctrl_clock_dividers_vi dividers;
@@ -1454,7 +1454,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
int result = 0;
SMU72_Discrete_MCArbDramTimingTable arb_regs;
uint32_t i, j;
@@ -1475,7 +1475,7 @@ static int tonga_program_memory_timing_parameters(struct pp_hwmgr *hwmgr)
if (!result) {
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.arb_table_start,
(uint8_t *)&arb_regs,
sizeof(SMU72_Discrete_MCArbDramTimingTable),
@@ -1492,7 +1492,7 @@ static int tonga_populate_smc_boot_level(struct pp_hwmgr *hwmgr,
int result = 0;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
table->GraphicsBootLevel = 0;
table->MemoryBootLevel = 0;
@@ -1543,7 +1543,7 @@ static int tonga_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
volt_with_cks, value;
uint16_t clock_freq_u16;
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
volt_offset = 0;
struct phm_ppt_v1_information *table_info =
@@ -1782,9 +1782,9 @@ static int tonga_populate_vr_config(struct pp_hwmgr *hwmgr,
* @param hwmgr the address of the powerplay hardware manager.
* @return always 0
*/
-static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
+static int tonga_init_arb_table_index(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
@@ -1797,7 +1797,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
* In reality this field should not be in that structure
* but in a soft register.
*/
- result = smu7_read_smc_sram_dword(smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, &tmp, SMC_RAM_END);
if (result != 0)
@@ -1806,7 +1806,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
tmp &= 0x00FFFFFF;
tmp |= ((uint32_t)MC_CG_ARB_FREQ_F1) << 24;
- return smu7_write_smc_sram_dword(smumgr,
+ return smu7_write_smc_sram_dword(hwmgr,
smu_data->smu7_data.arb_table_start, tmp, SMC_RAM_END);
}
@@ -1814,7 +1814,7 @@ static int tonga_init_arb_table_index(struct pp_smumgr *smumgr)
static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
SMU72_Discrete_DpmTable *dpm_table = &(smu_data->smc_state_table);
struct phm_ppt_v1_information *table_info =
@@ -1838,7 +1838,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
dpm_table->DTEAmbientTempBase = defaults->dte_ambient_temp_base;
dpm_table->BAPM_TEMP_GRADIENT =
- PP_HOST_TO_SMC_UL(defaults->bamp_temp_gradient);
+ PP_HOST_TO_SMC_UL(defaults->bapm_temp_gradient);
pdef1 = defaults->bapmti_r;
pdef2 = defaults->bapmti_rc;
@@ -1861,7 +1861,7 @@ static int tonga_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr)
static int tonga_populate_svi_load_line(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
smu_data->power_tune_table.SviLoadLineEn = defaults->svi_load_line_en;
@@ -1876,7 +1876,7 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
{
uint16_t tdc_limit;
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1897,11 +1897,11 @@ static int tonga_populate_tdc_limit(struct pp_hwmgr *hwmgr)
static int tonga_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
const struct tonga_pt_defaults *defaults = smu_data->power_tune_defaults;
uint32_t temp;
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
fuse_table_offset +
offsetof(SMU72_Discrete_PmFuses, TdcWaterfallCtl),
(uint32_t *)&temp, SMC_RAM_END))
@@ -1919,7 +1919,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
{
int i;
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -1930,7 +1930,7 @@ static int tonga_populate_temperature_scaler(struct pp_hwmgr *hwmgr)
static int tonga_populate_fuzzy_fan(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
if ((hwmgr->thermal_controller.advanceFanControlParameters.
usFanOutputSensitivity & (1 << 15)) ||
@@ -1949,7 +1949,7 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
{
int i;
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
/* Currently not used. Set all to zero. */
for (i = 0; i < 16; i++)
@@ -1958,15 +1958,10 @@ static int tonga_populate_gnb_lpml(struct pp_hwmgr *hwmgr)
return 0;
}
-static int tonga_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
-{
- return 0;
-}
-
static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd;
@@ -1987,12 +1982,12 @@ static int tonga_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t pm_fuse_table_offset;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_PowerContainment)) {
- if (smu7_read_smc_sram_dword(hwmgr->smumgr,
+ if (smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, PmFuseTable),
&pm_fuse_table_offset, SMC_RAM_END))
@@ -2035,13 +2030,6 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
"Attempt to populate GnbLPML Failed !",
return -EINVAL);
- /* DW19 */
- if (tonga_min_max_vgnb_lpml_id_from_bapm_vddc(hwmgr))
- PP_ASSERT_WITH_CODE(false,
- "Attempt to populate GnbLPML "
- "Min and Max Vid Failed !",
- return -EINVAL);
-
/* DW20 */
if (tonga_populate_bapm_vddc_base_leakage_sidd(hwmgr))
PP_ASSERT_WITH_CODE(
@@ -2050,7 +2038,7 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
"Hi and Lo Sidd Failed !",
return -EINVAL);
- if (smu7_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset,
+ if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset,
(uint8_t *)&smu_data->power_tune_table,
sizeof(struct SMU72_Discrete_PmFuses), SMC_RAM_END))
PP_ASSERT_WITH_CODE(false,
@@ -2060,10 +2048,10 @@ static int tonga_populate_pm_fuses(struct pp_hwmgr *hwmgr)
return 0;
}
-static int tonga_populate_mc_reg_address(struct pp_smumgr *smumgr,
+static int tonga_populate_mc_reg_address(struct pp_hwmgr *hwmgr,
SMU72_Discrete_MCRegisters *mc_reg_table)
{
- const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)smumgr->backend;
+ const struct tonga_smumgr *smu_data = (struct tonga_smumgr *)hwmgr->smu_backend;
uint32_t i, j;
@@ -2104,12 +2092,12 @@ static void tonga_convert_mc_registers(
}
static int tonga_convert_mc_reg_table_entry_to_smc(
- struct pp_smumgr *smumgr,
+ struct pp_hwmgr *hwmgr,
const uint32_t memory_clock,
SMU72_Discrete_MCRegisterSet *mc_reg_table_data
)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t i = 0;
for (i = 0; i < smu_data->mc_reg_table.num_entries; i++) {
@@ -2139,7 +2127,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
res = tonga_convert_mc_reg_table_entry_to_smc(
- hwmgr->smumgr,
+ hwmgr,
data->dpm_table.mclk_table.dpm_levels[i].value,
&mc_regs->data[i]
);
@@ -2153,8 +2141,7 @@ static int tonga_convert_mc_reg_table_to_smc(struct pp_hwmgr *hwmgr,
static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
{
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t address;
int32_t result;
@@ -2175,7 +2162,7 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
(uint32_t)offsetof(SMU72_Discrete_MCRegisters, data[0]);
return smu7_copy_bytes_to_smc(
- hwmgr->smumgr, address,
+ hwmgr, address,
(uint8_t *)&smu_data->mc_regs.data[0],
sizeof(SMU72_Discrete_MCRegisterSet) *
data->dpm_table.mclk_table.count,
@@ -2185,11 +2172,10 @@ static int tonga_update_and_upload_mc_reg_table(struct pp_hwmgr *hwmgr)
static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct pp_smumgr *smumgr = hwmgr->smumgr;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
memset(&smu_data->mc_regs, 0x00, sizeof(SMU72_Discrete_MCRegisters));
- result = tonga_populate_mc_reg_address(smumgr, &(smu_data->mc_regs));
+ result = tonga_populate_mc_reg_address(hwmgr, &(smu_data->mc_regs));
PP_ASSERT_WITH_CODE(!result,
"Failed to initialize MCRegTable for the MC register addresses !",
return result;);
@@ -2199,13 +2185,13 @@ static int tonga_populate_initial_mc_reg_table(struct pp_hwmgr *hwmgr)
"Failed to initialize MCRegTable for driver state !",
return result;);
- return smu7_copy_bytes_to_smc(smumgr, smu_data->smu7_data.mc_reg_table_start,
+ return smu7_copy_bytes_to_smc(hwmgr, smu_data->smu7_data.mc_reg_table_start,
(uint8_t *)&smu_data->mc_regs, sizeof(SMU72_Discrete_MCRegisters), SMC_RAM_END);
}
static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2221,7 +2207,7 @@ static void tonga_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
static void tonga_save_default_power_profile(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *data = (struct tonga_smumgr *)(hwmgr->smu_backend);
struct SMU72_Discrete_GraphicsLevel *levels =
data->smc_state_table.GraphicsLevel;
unsigned min_level = 1;
@@ -2267,7 +2253,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
int result;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
SMU72_Discrete_DpmTable *table = &(smu_data->smc_state_table);
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2483,7 +2469,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
/* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.dpm_table_start + offsetof(SMU72_Discrete_DpmTable, SystemFlags),
(uint8_t *)&(table->SystemFlags),
sizeof(SMU72_Discrete_DpmTable) - 3 * sizeof(SMU72_PIDController),
@@ -2492,7 +2478,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to upload dpm data to SMC memory !", return result;);
- result = tonga_init_arb_table_index(hwmgr->smumgr);
+ result = tonga_init_arb_table_index(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to upload arb data to SMC memory !", return result);
@@ -2521,7 +2507,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
SMU72_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
uint32_t duty100;
uint32_t t_diff1, t_diff2, pwm_diff1, pwm_diff2;
@@ -2600,7 +2586,7 @@ int tonga_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
fan_table.FanControl_GL_Flag = 1;
- res = smu7_copy_bytes_to_smc(hwmgr->smumgr,
+ res = smu7_copy_bytes_to_smc(hwmgr,
smu_data->smu7_data.fan_table_start,
(uint8_t *)&fan_table,
(uint32_t)sizeof(fan_table),
@@ -2625,7 +2611,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
int result = 0;
uint32_t low_sclk_interrupt_threshold = 0;
@@ -2642,7 +2628,7 @@ int tonga_update_sclk_threshold(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold);
result = smu7_copy_bytes_to_smc(
- hwmgr->smumgr,
+ hwmgr,
smu_data->smu7_data.dpm_table_start +
offsetof(SMU72_Discrete_DpmTable,
LowSclkInterruptThreshold),
@@ -2728,7 +2714,7 @@ uint32_t tonga_get_mac_definition(uint32_t value)
static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2753,7 +2739,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_UVDDPM) ||
phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
return 0;
@@ -2762,7 +2748,7 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *smu_data =
- (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -2784,7 +2770,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
(uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
return 0;
@@ -2792,7 +2778,7 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
{
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t mm_boot_level_offset, mm_boot_level_value;
smu_data->smc_state_table.SamuBootLevel = 0;
@@ -2810,7 +2796,7 @@ static int tonga_update_samu_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_StablePState))
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SAMUDPM_SetEnabledMask,
(uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel));
return 0;
@@ -2844,13 +2830,13 @@ int tonga_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type)
int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
uint32_t tmp;
int result;
bool error = false;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, DpmTable),
&tmp, SMC_RAM_END);
@@ -2860,7 +2846,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (result != 0);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, SoftRegisters),
&tmp, SMC_RAM_END);
@@ -2873,7 +2859,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (result != 0);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, mcRegisterTable),
&tmp, SMC_RAM_END);
@@ -2881,7 +2867,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
if (!result)
smu_data->smu7_data.mc_reg_table_start = tmp;
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, FanTable),
&tmp, SMC_RAM_END);
@@ -2891,7 +2877,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (result != 0);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, mcArbDramTimingTable),
&tmp, SMC_RAM_END);
@@ -2901,7 +2887,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
error |= (result != 0);
- result = smu7_read_smc_sram_dword(hwmgr->smumgr,
+ result = smu7_read_smc_sram_dword(hwmgr,
SMU72_FIRMWARE_HEADER_LOCATION +
offsetof(SMU72_Firmware_Header, Version),
&tmp, SMC_RAM_END);
@@ -3170,7 +3156,7 @@ static int tonga_set_valid_flag(struct tonga_mc_reg_table *table)
int tonga_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
{
int result;
- struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smumgr->backend);
+ struct tonga_smumgr *smu_data = (struct tonga_smumgr *)(hwmgr->smu_backend);
pp_atomctrl_mc_reg_table *table;
struct tonga_mc_reg_table *ni_table = &smu_data->mc_reg_table;
uint8_t module_index = tonga_get_memory_modile_index(hwmgr);
@@ -3253,7 +3239,7 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
struct amd_pp_profile *request)
{
struct tonga_smumgr *smu_data = (struct tonga_smumgr *)
- (hwmgr->smumgr->backend);
+ (hwmgr->smu_backend);
struct SMU72_Discrete_GraphicsLevel *levels =
smu_data->smc_state_table.GraphicsLevel;
uint32_t array = smu_data->smu7_data.dpm_table_start +
@@ -3270,6 +3256,6 @@ int tonga_populate_requested_graphic_levels(struct pp_hwmgr *hwmgr,
levels[i].DownHyst = request->down_hyst;
}
- return smu7_copy_bytes_to_smc(hwmgr->smumgr, array, (uint8_t *)levels,
+ return smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels,
array_size, SMC_RAM_END);
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
index 962860f13f24..9d6a78a65976 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smc.h
@@ -40,7 +40,7 @@ struct tonga_pt_defaults {
uint8_t tdc_waterfall_ctl;
uint8_t dte_ambient_temp_base;
uint32_t display_cac;
- uint32_t bamp_temp_gradient;
+ uint32_t bapm_temp_gradient;
uint16_t bapmti_r[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
uint16_t bapmti_rc[SMU72_DTE_ITERATIONS * SMU72_DTE_SOURCES * SMU72_DTE_SINKS];
};
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index c35f4c35c9ca..d22cf218cf18 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -37,125 +37,125 @@
#include "smu7_smumgr.h"
-static int tonga_start_in_protection_mode(struct pp_smumgr *smumgr)
+static int tonga_start_in_protection_mode(struct pp_hwmgr *hwmgr)
{
int result;
/* Assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result)
return result;
/* Clear status */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_STATUS, 0);
/* Enable clock */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/* De-assert reset */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Set SMU Auto Start */
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_INPUT_DATA, AUTO_START, 1);
/* Clear firmware interrupt enable flag */
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1);
/**
* Call Test SMU message with 0x20000 offset to trigger SMU start
*/
- smu7_send_msg_to_smc_offset(smumgr);
+ smu7_send_msg_to_smc_offset(hwmgr);
/* Wait for done bit to be set */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
SMU_STATUS, SMU_DONE, 0);
/* Check pass/failed indicator */
- if (1 != SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device,
+ if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, SMU_STATUS, SMU_PASS)) {
pr_err("SMU Firmware start failed\n");
return -EINVAL;
}
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return 0;
}
-static int tonga_start_in_non_protection_mode(struct pp_smumgr *smumgr)
+static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr)
{
int result = 0;
/* wait for smc boot up */
- SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
RCU_UC_EVENTS, boot_seq_done, 0);
/*Clear firmware interrupt enable flag*/
- cgs_write_ind_register(smumgr->device, CGS_IND_REG__SMC,
+ cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixFIRMWARE_FLAGS, 0);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 1);
- result = smu7_upload_smu_firmware_image(smumgr);
+ result = smu7_upload_smu_firmware_image(hwmgr);
if (result != 0)
return result;
/* Set smc instruct start point at 0x0 */
- smu7_program_jump_on_start(smumgr);
+ smu7_program_jump_on_start(hwmgr);
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0);
/*De-assert reset*/
- SMUM_WRITE_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMC_SYSCON_RESET_CNTL, rst_reg, 0);
/* Wait for firmware to initialize */
- SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, SMC_IND,
+ PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND,
FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1);
return result;
}
-static int tonga_start_smu(struct pp_smumgr *smumgr)
+static int tonga_start_smu(struct pp_hwmgr *hwmgr)
{
int result;
/* Only start SMC if SMC RAM is not running */
- if (!(smu7_is_smc_ram_running(smumgr) ||
- cgs_is_virtualization_enabled(smumgr->device))) {
+ if (!(smu7_is_smc_ram_running(hwmgr) ||
+ cgs_is_virtualization_enabled(hwmgr->device))) {
/*Check if SMU is running in protected mode*/
- if (0 == SMUM_READ_VFPF_INDIRECT_FIELD(smumgr->device, CGS_IND_REG__SMC,
+ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
SMU_FIRMWARE, SMU_MODE)) {
- result = tonga_start_in_non_protection_mode(smumgr);
+ result = tonga_start_in_non_protection_mode(hwmgr);
if (result)
return result;
} else {
- result = tonga_start_in_protection_mode(smumgr);
+ result = tonga_start_in_protection_mode(hwmgr);
if (result)
return result;
}
}
- result = smu7_request_smu_load_fw(smumgr);
+ result = smu7_request_smu_load_fw(hwmgr);
return result;
}
@@ -167,7 +167,7 @@ static int tonga_start_smu(struct pp_smumgr *smumgr)
* @param smcAddress the address in the SMC RAM to access.
* @param value to write to the SMC SRAM.
*/
-static int tonga_smu_init(struct pp_smumgr *smumgr)
+static int tonga_smu_init(struct pp_hwmgr *hwmgr)
{
struct tonga_smumgr *tonga_priv = NULL;
int i;
@@ -176,9 +176,9 @@ static int tonga_smu_init(struct pp_smumgr *smumgr)
if (tonga_priv == NULL)
return -ENOMEM;
- smumgr->backend = tonga_priv;
+ hwmgr->smu_backend = tonga_priv;
- if (smu7_init(smumgr))
+ if (smu7_init(hwmgr))
return -EINVAL;
for (i = 0; i < SMU72_MAX_LEVELS_GRAPHICS; i++)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 408514c965a0..2f979fb86824 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -53,20 +53,20 @@
#define smnMP0_FW_INTF 0x3010104
#define smnMP1_PUB_CTRL 0x3010b14
-static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
+static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr)
{
uint32_t mp1_fw_flags, reg;
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2);
- cgs_write_register(smumgr->device, reg,
+ cgs_write_register(hwmgr->device, reg,
(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff)));
reg = soc15_get_register_offset(NBIF_HWID, 0,
mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2);
- mp1_fw_flags = cgs_read_register(smumgr->device, reg);
+ mp1_fw_flags = cgs_read_register(hwmgr->device, reg);
if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
return true;
@@ -80,20 +80,20 @@ static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr)
* @param smumgr the address of the powerplay hardware manager.
* @return TRUE SMC has responded, FALSE otherwise.
*/
-static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
+static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- smum_wait_for_register_unequal(smumgr, reg,
+ phm_wait_for_register_unequal(hwmgr, reg,
0, MP1_C2PMSG_90__CONTENT_MASK);
- return cgs_read_register(smumgr->device, reg);
+ return cgs_read_register(hwmgr->device, reg);
}
/*
@@ -102,43 +102,43 @@ static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr)
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr,
uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66);
- cgs_write_register(smumgr->device, reg, msg);
+ cgs_write_register(hwmgr->device, reg, msg);
return 0;
}
/*
* Send a message to the SMC, and wait for its response.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param msg the message to send.
* @return Always return 0.
*/
-int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
+int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -146,32 +146,32 @@ int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
/*
* Send a message to the SMC with parameter
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return Always return 0.
*/
-int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
+int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
uint16_t msg, uint32_t parameter)
{
uint32_t reg;
- if (!vega10_is_smc_ram_running(smumgr))
+ if (!vega10_is_smc_ram_running(hwmgr))
return -EINVAL;
- vega10_wait_for_response(smumgr);
+ vega10_wait_for_response(hwmgr);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90);
- cgs_write_register(smumgr->device, reg, 0);
+ cgs_write_register(hwmgr->device, reg, 0);
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
- if (vega10_wait_for_response(smumgr) != 1)
+ if (vega10_wait_for_response(hwmgr) != 1)
pr_err("Failed to send message: 0x%x\n", msg);
return 0;
@@ -180,51 +180,51 @@ int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr,
/*
* Send a message to the SMC with parameter, do not wait for response
- * @param smumgr: the address of the powerplay hardware manager.
+ * @param hwmgr: the address of the powerplay hardware manager.
* @param msg: the message to send.
* @param parameter: the parameter to send
* @return The response that came from the SMC.
*/
int vega10_send_msg_to_smc_with_parameter_without_waiting(
- struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter)
+ struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- cgs_write_register(smumgr->device, reg, parameter);
+ cgs_write_register(hwmgr->device, reg, parameter);
- return vega10_send_msg_to_smc_without_waiting(smumgr, msg);
+ return vega10_send_msg_to_smc_without_waiting(hwmgr, msg);
}
/*
* Retrieve an argument from SMC.
- * @param smumgr the address of the powerplay hardware manager.
+ * @param hwmgr the address of the powerplay hardware manager.
* @param arg pointer to store the argument from SMC.
* @return Always return 0.
*/
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg)
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg)
{
uint32_t reg;
reg = soc15_get_register_offset(MP1_HWID, 0,
mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82);
- *arg = cgs_read_register(smumgr->device, reg);
+ *arg = cgs_read_register(hwmgr->device, reg);
return 0;
}
/*
* Copy table from SMC into driver FB
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the driver's table ID to copy from
*/
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -232,16 +232,16 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
@@ -255,14 +255,14 @@ int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
/*
* Copy table from Driver FB into SMC
- * @param smumgr the address of the SMC manager
+ * @param hwmgr the address of the HW manager
* @param table_id the table to copy from
*/
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE,
"Invalid SMU Table ID!", return -EINVAL);
@@ -274,17 +274,17 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
memcpy(priv->smu_tables.entry[table_id].table, table,
priv->smu_tables.entry[table_id].size);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
priv->smu_tables.entry[table_id].table_addr_high) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
priv->smu_tables.entry[table_id].table_addr_low) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr,
+ PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
priv->smu_tables.entry[table_id].table_id) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
@@ -293,87 +293,87 @@ int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
return 0;
}
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_from_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table)
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table)
{
PP_ASSERT_WITH_CODE(avfs_table,
"No access to SMC AVFS Table",
return -EINVAL);
- return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE);
+ return vega10_copy_table_to_smc(hwmgr, avfs_table, AVFSTABLE);
}
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask)
{
int msg = enable ? PPSMC_MSG_EnableSmuFeatures :
PPSMC_MSG_DisableSmuFeatures;
- return vega10_send_msg_to_smc_with_parameter(smumgr,
+ return vega10_send_msg_to_smc_with_parameter(hwmgr,
msg, feature_mask);
}
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled)
{
if (features_enabled == NULL)
return -EINVAL;
- if (!vega10_send_msg_to_smc(smumgr,
+ if (!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetEnabledSmuFeatures)) {
- vega10_read_arg_from_smc(smumgr, features_enabled);
+ vega10_read_arg_from_smc(hwmgr, features_enabled);
return 0;
}
return -EINVAL;
}
-int vega10_set_tools_address(struct pp_smumgr *smumgr)
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high ||
priv->smu_tables.entry[TOOLSTABLE].table_addr_low) {
- if (!vega10_send_msg_to_smc_with_parameter(smumgr,
+ if (!vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
priv->smu_tables.entry[TOOLSTABLE].table_addr_high))
- vega10_send_msg_to_smc_with_parameter(smumgr,
+ vega10_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
priv->smu_tables.entry[TOOLSTABLE].table_addr_low);
}
return 0;
}
-static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
+static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
struct cgs_system_info sys_info = {0};
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr,
+ PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(hwmgr,
PPSMC_MSG_GetDriverIfVersion),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- vega10_read_arg_from_smc(smumgr, &smc_driver_if_version);
+ vega10_read_arg_from_smc(hwmgr, &smc_driver_if_version);
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_DEV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
dev_id = (uint32_t)sys_info.value;
sys_info.size = sizeof(struct cgs_system_info);
sys_info.info_id = CGS_SYSTEM_INFO_PCIE_REV;
- cgs_query_system_info(smumgr->device, &sys_info);
+ cgs_query_system_info(hwmgr->device, &sys_info);
rev_id = (uint32_t)sys_info.value;
if (!((dev_id == 0x687f) &&
@@ -392,7 +392,7 @@ static int vega10_verify_smc_interface(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_init(struct pp_smumgr *smumgr)
+static int vega10_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv;
uint64_t mc_addr;
@@ -401,7 +401,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
int ret;
struct cgs_firmware_info info = {0};
- ret = cgs_get_firmware_info(smumgr->device,
+ ret = cgs_get_firmware_info(hwmgr->device,
smu7_convert_fw_type_to_cgs(UCODE_ID_SMU),
&info);
if (ret || !info.kptr)
@@ -412,10 +412,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
if (!priv)
return -ENOMEM;
- smumgr->backend = priv;
+ hwmgr->smu_backend = priv;
/* allocate space for pptable */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(PPTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -425,8 +425,8 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for pptable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -441,7 +441,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[PPTABLE].handle = handle;
/* allocate space for watermarks table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(Watermarks_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -451,10 +451,10 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for wmtable.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -469,7 +469,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
priv->smu_tables.entry[WMTABLE].handle = handle;
/* allocate space for AVFS table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsTable_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -479,12 +479,12 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -500,7 +500,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
tools_size = 0x19000;
if (tools_size) {
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
tools_size,
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -522,7 +522,7 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
}
/* allocate space for AVFS Fuse table */
- smu_allocate_memory(smumgr->device,
+ smu_allocate_memory(hwmgr->device,
sizeof(AvfsFuseOverride_t),
CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB,
PAGE_SIZE,
@@ -532,16 +532,16 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
PP_ASSERT_WITH_CODE(kaddr,
"[vega10_smu_init] Out of memory for avfs fuse table.",
- kfree(smumgr->backend);
- cgs_free_gpu_mem(smumgr->device,
+ kfree(hwmgr->smu_backend);
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)handle);
return -EINVAL);
@@ -558,36 +558,36 @@ static int vega10_smu_init(struct pp_smumgr *smumgr)
return 0;
}
-static int vega10_smu_fini(struct pp_smumgr *smumgr)
+static int vega10_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega10_smumgr *priv =
- (struct vega10_smumgr *)(smumgr->backend);
+ (struct vega10_smumgr *)(hwmgr->smu_backend);
if (priv) {
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle);
if (priv->smu_tables.entry[TOOLSTABLE].table)
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle);
- cgs_free_gpu_mem(smumgr->device,
+ cgs_free_gpu_mem(hwmgr->device,
(cgs_handle_t)priv->smu_tables.entry[AVFSFUSETABLE].handle);
- kfree(smumgr->backend);
- smumgr->backend = NULL;
+ kfree(hwmgr->smu_backend);
+ hwmgr->smu_backend = NULL;
}
return 0;
}
-static int vega10_start_smu(struct pp_smumgr *smumgr)
+static int vega10_start_smu(struct pp_hwmgr *hwmgr)
{
- PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr),
+ PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(hwmgr),
"Failed to verify SMC interface!",
return -EINVAL);
- vega10_set_tools_address(smumgr);
+ vega10_set_tools_address(hwmgr);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
index 821425c1e4e0..0695455b21b2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h
@@ -52,19 +52,19 @@ struct vega10_smumgr {
struct smu_table_array smu_tables;
};
-int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg);
-int vega10_copy_table_from_smc(struct pp_smumgr *smumgr,
+int vega10_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg);
+int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_copy_table_to_smc(struct pp_smumgr *smumgr,
+int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
uint8_t *table, int16_t table_id);
-int vega10_enable_smc_features(struct pp_smumgr *smumgr,
+int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
bool enable, uint32_t feature_mask);
-int vega10_get_smc_features(struct pp_smumgr *smumgr,
+int vega10_get_smc_features(struct pp_hwmgr *hwmgr,
uint32_t *features_enabled);
-int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
-int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table);
+int vega10_save_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
+int vega10_restore_vft_table(struct pp_hwmgr *hwmgr, uint8_t *avfs_table);
-int vega10_set_tools_address(struct pp_smumgr *smumgr);
+int vega10_set_tools_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 4acbb944bcd2..be16c6390216 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -109,5 +109,3 @@ radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-
-CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index e82a99cb2459..ab32830c4e23 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -58,7 +58,7 @@ enum r600_hdmi_iec_status_bits {
static struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio_pin status;
+ struct r600_audio_pin status = {};
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index fd25361ac681..2fcf805d3a16 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -322,10 +322,10 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
if (rfb->obj) {
radeonfb_destroy_pinned_object(rfb->obj);
rfb->obj = NULL;
+ drm_framebuffer_unregister_private(&rfb->base);
+ drm_framebuffer_cleanup(&rfb->base);
}
drm_fb_helper_fini(&rfbdev->helper);
- drm_framebuffer_unregister_private(&rfb->base);
- drm_framebuffer_cleanup(&rfb->base);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index f6578c96925c..a2ac8ac0930d 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -58,6 +58,10 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+
+static int alloc_pasid(unsigned int bits);
+static void free_pasid(unsigned int pasid);
+
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
@@ -112,6 +116,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.get_vmem_size = get_vmem_size,
.get_gpu_clock_counter = get_gpu_clock_counter,
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
+ .alloc_pasid = alloc_pasid,
+ .free_pasid = free_pasid,
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
.init_pipeline = kgd_init_pipeline,
@@ -341,6 +347,31 @@ static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
}
+/*
+ * PASID manager
+ */
+static DEFINE_IDA(pasid_ida);
+
+int alloc_pasid(unsigned int bits)
+{
+ int pasid = -EINVAL;
+
+ for (bits = min(bits, 31U); bits > 0; bits--) {
+ pasid = ida_simple_get(&pasid_ida,
+ 1U << (bits - 1), 1U << bits,
+ GFP_KERNEL);
+ if (pasid != -ENOSPC)
+ break;
+ }
+
+ return pasid;
+}
+
+void free_pasid(unsigned int pasid)
+{
+ ida_simple_remove(&pasid_ida, pasid);
+}
+
static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
{
return (struct radeon_device *)kgd;
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index fdce4062901f..815eaa8c394b 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -204,5 +204,5 @@ DEFINE_EVENT(radeon_semaphore_request, radeon_semaphore_wait,
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/radeon
#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index bf69bf9086bf..8032da57e409 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -725,8 +725,6 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
- unsigned i;
- int r;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (ttm->state != tt_unpopulated)
@@ -762,33 +760,13 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
}
#endif
- r = ttm_pool_populate(ttm);
- if (r) {
- return r;
- }
-
- for (i = 0; i < ttm->num_pages; i++) {
- gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (i--) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- gtt->ttm.dma_address[i] = 0;
- }
- ttm_pool_unpopulate(ttm);
- return -EFAULT;
- }
- }
- return 0;
+ return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
- unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
if (gtt && gtt->userptr) {
@@ -815,14 +793,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
#endif
- for (i = 0; i < ttm->num_pages; i++) {
- if (gtt->ttm.dma_address[i]) {
- pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- }
-
- ttm_pool_unpopulate(ttm);
+ ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
}
int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 180ce6296416..d79607a1187c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -440,28 +440,29 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
struct ttm_bo_global *glob = bo->glob;
int ret;
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ reservation_object_wait_timeout_rcu(bo->resv, true, false,
+ 30 * HZ);
+ spin_lock(&glob->lru_lock);
+ goto error;
+ }
+
spin_lock(&glob->lru_lock);
ret = __ttm_bo_reserve(bo, false, true, NULL);
-
if (!ret) {
- if (!ttm_bo_wait(bo, false, true)) {
+ if (reservation_object_test_signaled_rcu(&bo->ttm_resv, true)) {
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
ttm_bo_cleanup_memtype_use(bo);
-
return;
}
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle and free it.
- */
- spin_unlock(&glob->lru_lock);
- ttm_bo_wait(bo, true, true);
- ttm_bo_cleanup_memtype_use(bo);
- return;
- }
ttm_bo_flush_all_fences(bo);
/*
@@ -474,11 +475,12 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
ttm_bo_add_to_lru(bo);
}
- if (bo->resv != &bo->ttm_resv)
- reservation_object_unlock(&bo->ttm_resv);
__ttm_bo_unreserve(bo);
}
+ if (bo->resv != &bo->ttm_resv)
+ reservation_object_unlock(&bo->ttm_resv);
+error:
kref_get(&bo->list_kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
spin_unlock(&glob->lru_lock);
@@ -555,6 +557,8 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
}
ttm_bo_del_from_lru(bo);
+ if (!list_empty(&bo->ddestroy) && (bo->resv != &bo->ttm_resv))
+ reservation_object_fini(&bo->ttm_resv);
list_del_init(&bo->ddestroy);
kref_put(&bo->list_kref, ttm_bo_ref_bug);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index c934ad5b3903..78cb99be7146 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -587,7 +587,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long offset, size;
int ret;
- BUG_ON(!list_empty(&bo->swap));
map->virtual = NULL;
map->bo = bo;
if (num_pages > bo->num_pages)
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 871599826773..052e1f102113 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -920,6 +920,49 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
+#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i;
+ int r;
+
+ r = ttm_pool_populate(&tt->ttm);
+ if (r)
+ return r;
+
+ for (i = 0; i < tt->ttm.num_pages; i++) {
+ tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, tt->dma_address[i])) {
+ while (i--) {
+ dma_unmap_page(dev, tt->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ tt->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ttm_populate_and_map_pages);
+
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+{
+ unsigned i;
+
+ for (i = 0; i < tt->ttm.num_pages; i++) {
+ if (tt->dma_address[i]) {
+ dma_unmap_page(dev, tt->dma_address[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ }
+ ttm_pool_unpopulate(&tt->ttm);
+}
+EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
+#endif
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 90ddbdca93bd..06bc14b55e66 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -60,22 +60,15 @@
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
#define FREE_ALL_PAGES (~0U)
-/* times are in msecs */
-#define IS_UNDEFINED (0)
-#define IS_WC (1<<1)
-#define IS_UC (1<<2)
-#define IS_CACHED (1<<3)
-#define IS_DMA32 (1<<4)
enum pool_type {
- POOL_IS_UNDEFINED,
- POOL_IS_WC = IS_WC,
- POOL_IS_UC = IS_UC,
- POOL_IS_CACHED = IS_CACHED,
- POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
- POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
- POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+ IS_UNDEFINED = 0,
+ IS_WC = 1 << 1,
+ IS_UC = 1 << 2,
+ IS_CACHED = 1 << 3,
+ IS_DMA32 = 1 << 4
};
+
/*
* The pool structure. There are usually six pools:
* - generic (not restricted to DMA32):
@@ -86,11 +79,9 @@ enum pool_type {
* The other ones can be shrunk by the shrinker API if neccessary.
* @pools: The 'struct device->dma_pools' link.
* @type: Type of the pool
- * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * @lock: Protects the free_list from concurrnet access. Must be
* used with irqsave/irqrestore variants because pool allocator maybe called
* from delayed work.
- * @inuse_list: Pool of pages that are in use. The order is very important and
- * it is in the order that the TTM pages that are put back are in.
* @free_list: Pool of pages that are free to be used. No order requirements.
* @dev: The device that is associated with these pools.
* @size: Size used during DMA allocation.
@@ -107,7 +98,6 @@ struct dma_pool {
struct list_head pools; /* The 'struct device->dma_pools link */
enum pool_type type;
spinlock_t lock;
- struct list_head inuse_list;
struct list_head free_list;
struct device *dev;
unsigned size;
@@ -609,7 +599,6 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
sec_pool->pool = pool;
INIT_LIST_HEAD(&pool->free_list);
- INIT_LIST_HEAD(&pool->inuse_list);
INIT_LIST_HEAD(&pool->pools);
spin_lock_init(&pool->lock);
pool->dev = dev;
@@ -882,22 +871,23 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
struct dma_pool *pool;
enum pool_type type;
unsigned i;
- gfp_t gfp_flags;
int ret;
if (ttm->state != tt_unpopulated)
return 0;
type = ttm_to_type(ttm->page_flags, ttm->caching_state);
- if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
- gfp_flags = GFP_USER | GFP_DMA32;
- else
- gfp_flags = GFP_HIGHUSER;
- if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
- gfp_flags |= __GFP_ZERO;
-
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
+ gfp_t gfp_flags;
+
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool)) {
return -ENOMEM;
OpenPOWER on IntegriCloud