summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/powerplay
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/powerplay')
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c100
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c1055
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h23
20 files changed, 902 insertions, 549 deletions
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index d567be49c31b..145e5c403bea 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -221,29 +221,7 @@ static int pp_sw_reset(void *handle)
static int pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
- struct amdgpu_device *adev = handle;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- int ret;
-
- if (!hwmgr || !hwmgr->pm_en)
- return 0;
-
- if (hwmgr->hwmgr_func->gfx_off_control) {
- /* Enable/disable GFX off through SMU */
- ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr,
- state == AMD_PG_STATE_GATE);
- if (ret)
- pr_err("gfx off control failed!\n");
- }
-
- if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) {
- pr_debug("%s was not implemented.\n", __func__);
- return 0;
- }
-
- /* Enable/disable GFX per cu powergating through SMU */
- return hwmgr->hwmgr_func->enable_per_cu_power_gating(hwmgr,
- state == AMD_PG_STATE_GATE);
+ return 0;
}
static int pp_suspend(void *handle)
@@ -1118,17 +1096,17 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
- if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges)
+ if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
mutex_lock(&hwmgr->smu_lock);
ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
mutex_unlock(&hwmgr->smu_lock);
return ret;
@@ -1168,19 +1146,78 @@ static int pp_get_display_mode_validation_clocks(void *handle,
return ret;
}
-static int pp_set_mmhub_powergating_by_smu(void *handle)
+static int pp_dpm_powergate_mmhub(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) {
+ if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
pr_info("%s was not implemented.\n", __func__);
return 0;
}
- return hwmgr->hwmgr_func->set_mmhub_powergating_by_smu(hwmgr);
+ return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
+}
+
+static int pp_dpm_powergate_gfx(void *handle, bool gate)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return 0;
+
+ if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return 0;
+ }
+
+ return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
+}
+
+static int pp_set_powergating_by_smu(void *handle,
+ uint32_t block_type, bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ case AMD_IP_BLOCK_TYPE_VCN:
+ pp_dpm_powergate_uvd(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ pp_dpm_powergate_vce(handle, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_GMC:
+ pp_dpm_powergate_mmhub(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_GFX:
+ ret = pp_dpm_powergate_gfx(handle, gate);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int pp_notify_smu_enable_pwe(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+
+ if (!hwmgr || !hwmgr->pm_en)
+ return -EINVAL;;
+
+ if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
+ pr_info("%s was not implemented.\n", __func__);
+ return -EINVAL;;
+ }
+
+ mutex_lock(&hwmgr->smu_lock);
+ hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
+ mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
static const struct amd_pm_funcs pp_dpm_funcs = {
@@ -1189,8 +1226,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.force_performance_level = pp_dpm_force_performance_level,
.get_performance_level = pp_dpm_get_performance_level,
.get_current_power_state = pp_dpm_get_current_power_state,
- .powergate_vce = pp_dpm_powergate_vce,
- .powergate_uvd = pp_dpm_powergate_uvd,
.dispatch_tasks = pp_dpm_dispatch_tasks,
.set_fan_control_mode = pp_dpm_set_fan_control_mode,
.get_fan_control_mode = pp_dpm_get_fan_control_mode,
@@ -1210,6 +1245,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_vce_clock_state = pp_dpm_get_vce_clock_state,
.switch_power_profile = pp_dpm_switch_power_profile,
.set_clockgating_by_smu = pp_set_clockgating_by_smu,
+ .set_powergating_by_smu = pp_set_powergating_by_smu,
.get_power_profile_mode = pp_get_power_profile_mode,
.set_power_profile_mode = pp_set_power_profile_mode,
.odn_edit_dpm_table = pp_odn_edit_dpm_table,
@@ -1227,5 +1263,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
.display_clock_voltage_request = pp_display_clock_voltage_request,
.get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
- .set_mmhub_powergating_by_smu = pp_set_mmhub_powergating_by_smu,
+ .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index a0bb921fac22..53207e76b0f3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -435,7 +435,7 @@ int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
PHM_FUNC_CHECK(hwmgr);
@@ -443,7 +443,7 @@ int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
return -EINVAL;
return hwmgr->hwmgr_func->set_watermarks_for_clocks_ranges(hwmgr,
- wm_with_clock_ranges);
+ clock_ranges);
}
int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 7047e29755c3..01dc46dc9c8a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -1544,14 +1544,14 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc,
switch (hwmgr->chip_id) {
case CHIP_TONGA:
case CHIP_FIJI:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc) / 4;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc) / 4;
return;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
- *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100);
- *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100);
+ *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc) / 100;
+ *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc) / 100;
return;
default:
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
return 0;
}
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_2 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_1 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
- struct atom_firmware_info_v3_1 *info = NULL;
+ struct atom_firmware_info_v3_2 *fwinfo_3_2;
+ struct atom_firmware_info_v3_1 *fwinfo_3_1;
+ struct atom_common_table_header *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
- info = (struct atom_firmware_info_v3_1 *)
+ info = (struct atom_common_table_header *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- boot_values->ulRevision = info->firmware_revision;
- boot_values->ulGfxClk = info->bootup_sclk_in10khz;
- boot_values->ulUClk = info->bootup_mclk_in10khz;
- boot_values->usVddc = info->bootup_vddc_mv;
- boot_values->usVddci = info->bootup_vddci_mv;
- boot_values->usMvddc = info->bootup_mvddc_mv;
- boot_values->usVddGfx = info->bootup_vddgfx_mv;
- boot_values->ucCoolingID = info->coolingsolution_id;
- boot_values->ulSocClk = 0;
- boot_values->ulDCEFClk = 0;
+ if ((info->format_revision == 3) && (info->content_revision == 2)) {
+ fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+ boot_values, fwinfo_3_2);
+ } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+ fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+ boot_values, fwinfo_3_1);
+ } else {
+ pr_info("Fw info table revision does not match!");
+ return -EINVAL;
+ }
return 0;
}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+ param->Vr2_I2C_address = info->Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulUClk;
uint32_t ulSocClk;
uint32_t ulDCEFClk;
+ uint32_t ulEClk;
+ uint32_t ulVClk;
+ uint32_t ulDClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
+
+ uint8_t Vr2_I2C_address;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
index 35bd9870ab10..4e1fd5393845 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c
@@ -183,10 +183,10 @@ static int get_vddc_lookup_table(
ATOM_Tonga_Voltage_Lookup_Record,
entries, vddc_lookup_pp_tables, i);
record->us_calculated = 0;
- record->us_vdd = atom_record->usVdd;
- record->us_cac_low = atom_record->usCACLow;
- record->us_cac_mid = atom_record->usCACMid;
- record->us_cac_high = atom_record->usCACHigh;
+ record->us_vdd = le16_to_cpu(atom_record->usVdd);
+ record->us_cac_low = le16_to_cpu(atom_record->usCACLow);
+ record->us_cac_mid = le16_to_cpu(atom_record->usCACMid);
+ record->us_cac_high = le16_to_cpu(atom_record->usCACHigh);
}
*lookup_table = table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index d4bc83e81389..a63e00653324 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -993,7 +993,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = latency_required ?
smu10_get_mem_latency(hwmgr,
pclk_vol_table->entries[i].clk) :
@@ -1044,7 +1044,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
clocks->num_levels = 0;
for (i = 0; i < pclk_vol_table->count; i++) {
- clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol;
clocks->num_levels++;
}
@@ -1108,9 +1108,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct smu10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
Watermarks_t *table = &(data->water_marks_table);
int result = 0;
@@ -1126,7 +1127,7 @@ static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
}
-static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr)
+static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
}
@@ -1182,10 +1183,11 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.asic_setup = smu10_setup_asic_task,
.power_state_set = smu10_set_power_state_tasks,
.dynamic_state_management_disable = smu10_disable_dpm_tasks,
- .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu,
+ .powergate_mmhub = smu10_powergate_mmhub,
.smus_notify_pwe = smu10_smus_notify_pwe,
.gfx_off_control = smu10_gfx_off_control,
.display_clock_voltage_request = smu10_display_clock_voltage_request,
+ .powergate_gfx = smu10_gfx_off_control,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 41495621d94a..683b29a99366 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -416,7 +416,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
* Powerplay will only control the static per CU Power Gating.
* Dynamic per CU Power Gating will be done in gfx.
*/
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable)
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
{
struct amdgpu_device *adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
index be7f66d2b234..fc8f8a6acc72 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.h
@@ -33,6 +33,6 @@ int smu7_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate);
int smu7_disable_clock_power_gating(struct pp_hwmgr *hwmgr);
int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
const uint32_t *msg_id);
-int smu7_enable_per_cu_power_gating(struct pp_hwmgr *hwmgr, bool enable);
+int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index b89d6fb8559b..077b79938528 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1578,7 +1578,7 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->current_profile_setting.sclk_up_hyst = 0;
data->current_profile_setting.sclk_down_hyst = 100;
data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
- data->current_profile_setting.bupdate_sclk = 1;
+ data->current_profile_setting.bupdate_mclk = 1;
data->current_profile_setting.mclk_up_hyst = 0;
data->current_profile_setting.mclk_down_hyst = 100;
data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
@@ -3183,7 +3183,7 @@ static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
state_entry->ucPCIEGenLow);
performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
- state_entry->ucPCIELaneHigh);
+ state_entry->ucPCIELaneLow);
performance_level = &(smu7_power_state->performance_levels
[smu7_power_state->performance_level_count++]);
@@ -5044,7 +5044,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
.get_fan_control_mode = smu7_get_fan_control_mode,
.force_clock_level = smu7_force_clock_level,
.print_clock_levels = smu7_print_clock_levels,
- .enable_per_cu_power_gating = smu7_enable_per_cu_power_gating,
+ .powergate_gfx = smu7_powergate_gfx,
.get_sclk_od = smu7_get_sclk_od,
.set_sclk_od = smu7_set_sclk_od,
.get_mclk_od = smu7_get_mclk_od,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index 93a3d022ba47..3effb5583d1f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -652,7 +652,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
}
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
{
uint32_t i;
struct watermarks *table = wt_table;
@@ -660,49 +660,49 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
if (!table || !wm_with_clock_ranges)
return -EINVAL;
- if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4)
+ if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
return -EINVAL;
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
table->WatermarkRow[1][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
100);
table->WatermarkRow[1][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[1][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
+ wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
}
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
+ for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
table->WatermarkRow[0][i].MinClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxClock =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MinUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].MaxUclk =
cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
+ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
+ 1000);
table->WatermarkRow[0][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
+ wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
index 916cc01e7652..5454289d5226 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h
@@ -107,7 +107,7 @@ int smu_get_voltage_dependency_table_ppt_v1(
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table);
int smu_set_watermarks_for_clocks_ranges(void *wt_table,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
#define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 3b8d36df52e9..5e771bc119d6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -55,12 +55,6 @@
static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
-#define MEM_FREQ_LOW_LATENCY 25000
-#define MEM_FREQ_HIGH_LATENCY 80000
-#define MEM_LATENCY_HIGH 245
-#define MEM_LATENCY_LOW 35
-#define MEM_LATENCY_ERR 0xFFFF
-
#define mmDF_CS_AON0_DramBaseAddress0 0x0044
#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
@@ -3223,7 +3217,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
/* Find the lowest MCLK frequency that is within
* the tolerable latency defined in DAL
*/
- latency = 0;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
for (i = 0; i < data->mclk_latency_table.count; i++) {
if ((data->mclk_latency_table.entries[i].latency <= latency) &&
(data->mclk_latency_table.entries[i].frequency >=
@@ -4064,28 +4058,17 @@ static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
table_info->vdd_dep_on_sclk;
uint32_t i;
+ clocks->num_levels = 0;
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
clocks->data[clocks->num_levels].clocks_in_khz =
- dep_table->entries[i].clk;
+ dep_table->entries[i].clk * 10;
clocks->num_levels++;
}
}
}
-static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
- uint32_t clock)
-{
- if (clock >= MEM_FREQ_LOW_LATENCY &&
- clock < MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_HIGH;
- else if (clock >= MEM_FREQ_HIGH_LATENCY)
- return MEM_LATENCY_LOW;
- else
- return MEM_LATENCY_ERR;
-}
-
static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct pp_clock_levels_with_latency *clocks)
{
@@ -4094,26 +4077,22 @@ static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
table_info->vdd_dep_on_mclk;
struct vega10_hwmgr *data = hwmgr->backend;
+ uint32_t j = 0;
uint32_t i;
- clocks->num_levels = 0;
- data->mclk_latency_table.count = 0;
-
for (i = 0; i < dep_table->count; i++) {
if (dep_table->entries[i].clk) {
- clocks->data[clocks->num_levels].clocks_in_khz =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].frequency =
- dep_table->entries[i].clk;
- clocks->data[clocks->num_levels].latency_in_us =
- data->mclk_latency_table.entries
- [data->mclk_latency_table.count].latency =
- vega10_get_mem_latency(hwmgr,
- dep_table->entries[i].clk);
- clocks->num_levels++;
- data->mclk_latency_table.count++;
+
+ clocks->data[j].clocks_in_khz =
+ dep_table->entries[i].clk * 10;
+ data->mclk_latency_table.entries[j].frequency =
+ dep_table->entries[i].clk;
+ clocks->data[j].latency_in_us =
+ data->mclk_latency_table.entries[j].latency = 25;
+ j++;
}
}
+ clocks->num_levels = data->mclk_latency_table.count = j;
}
static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
@@ -4126,7 +4105,7 @@ static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4142,7 +4121,7 @@ static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
uint32_t i;
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].latency_in_us = 0;
clocks->num_levels++;
}
@@ -4202,7 +4181,7 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
for (i = 0; i < dep_table->count; i++) {
- clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
+ clocks->data[i].clocks_in_khz = dep_table->entries[i].clk * 10;
clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
entries[dep_table->entries[i].vddInd].us_vdd);
clocks->num_levels++;
@@ -4215,9 +4194,10 @@ static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_range)
{
struct vega10_hwmgr *data = hwmgr->backend;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_range;
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
int result = 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index dbe4b1f66784..22364875a943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- int result;
+ int result = 0;
uint32_t num_se = 0;
uint32_t count, data;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..57492878874f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disallowed_features = 0x0;
data->registry_data.od_state_in_dc_support = 0;
+ data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
data->registry_data.log_avfs_param = 0;
@@ -453,37 +454,30 @@ static int vega12_setup_asic_task(struct pp_hwmgr *hwmgr)
*/
static void vega12_init_dpm_state(struct vega12_dpm_state *dpm_state)
{
- dpm_state->soft_min_level = 0xff;
- dpm_state->soft_max_level = 0xff;
- dpm_state->hard_min_level = 0xff;
- dpm_state->hard_max_level = 0xff;
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
}
-static int vega12_get_number_dpm_level(struct pp_hwmgr *hwmgr,
- PPCLK_e clkID, uint32_t *num_dpm_level)
+static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
+ PPCLK_e clk_id, uint32_t *num_of_levels)
{
- int result;
- /*
- * SMU expects the Clock ID to be in the top 16 bits.
- * Lower 16 bits specify the level however 0xFF is a
- * special argument the returns the total number of levels
- */
- PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | 0xFF)) == 0,
- "[GetNumberDpmLevel] Failed to get DPM levels from SMU for CLKID!",
- return -EINVAL);
-
- result = vega12_read_arg_from_smc(hwmgr, num_dpm_level);
+ int ret = 0;
- PP_ASSERT_WITH_CODE(*num_dpm_level < MAX_REGULAR_DPM_NUMBER,
- "[GetNumberDPMLevel] Number of DPM levels is greater than limit",
- return -EINVAL);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ PP_ASSERT_WITH_CODE(!ret,
+ "[GetNumOfDpmLevel] failed to get dpm levels!",
+ return ret);
- PP_ASSERT_WITH_CODE(*num_dpm_level != 0,
- "[GetNumberDPMLevel] Number of CLK Levels is zero!",
- return -EINVAL);
+ vega12_read_arg_from_smc(hwmgr, num_of_levels);
+ PP_ASSERT_WITH_CODE(*num_of_levels > 0,
+ "[GetNumOfDpmLevel] number of clk levels is invalid!",
+ return -EINVAL);
- return result;
+ return ret;
}
static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
@@ -509,6 +503,31 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
return result;
}
+static int vega12_setup_single_dpm_table(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table, PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels, clk;
+
+ ret = vega12_get_number_of_dpm_level(hwmgr, clk_id, &num_of_levels);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk levels!",
+ return ret);
+
+ dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = vega12_get_dpm_frequency_by_index(hwmgr, clk_id, i, &clk);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupSingleDpmTable] failed to get clk of specific level!",
+ return ret);
+ dpm_table->dpm_levels[i].value = clk;
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+
+ return ret;
+}
+
/*
* This function is to initialize all DPM state tables
* for SMU based on the dependency table.
@@ -519,224 +538,136 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*/
static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
{
- uint32_t num_levels, i, clock;
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
-
struct vega12_single_dpm_table *dpm_table;
+ int ret = 0;
memset(&data->dpm_table, 0, sizeof(data->dpm_table));
- /* Initialize Sclk DPM and SOC DPM table based on allow Sclk values */
+ /* socclk */
dpm_table = &(data->dpm_table.soc_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_SOCCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_SOCCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for SOCCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_SOCCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.soc_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* gfxclk */
dpm_table = &(data->dpm_table.gfx_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_GFXCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_GFXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for GFXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_GFXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.gfx_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Initialize Mclk DPM table based on allow Mclk values */
- dpm_table = &(data->dpm_table.mem_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_UCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_UCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for UCLK!",
- return -EINVAL);
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_UCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.mem_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* eclk */
dpm_table = &(data->dpm_table.eclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_ECLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_ECLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for ECLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_ECLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.eclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* vclk */
dpm_table = &(data->dpm_table.vclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_VCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_VCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for VCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_VCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.vclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dclk */
dpm_table = &(data->dpm_table.dclk_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr, PPCLK_DCLK,
- &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dclock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
- /* Assume there is no headless Vega12 for now */
+ /* dcefclk */
dpm_table = &(data->dpm_table.dcef_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DCEFCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DCEFCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DCEFCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DCEFCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
+ return ret);
+ } else {
+ dpm_table->count = 1;
+ dpm_table->dpm_levels[0].value = data->vbios_boot_state.dcef_clock / 100;
}
-
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* pixclk */
dpm_table = &(data->dpm_table.pixel_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PIXCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PIXCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PIXCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PIXCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* dispclk */
dpm_table = &(data->dpm_table.display_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_DISPCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_DISPCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for DISPCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_DISPCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
+ /* phyclk */
dpm_table = &(data->dpm_table.phy_table);
-
- PP_ASSERT_WITH_CODE(vega12_get_number_dpm_level(hwmgr,
- PPCLK_PHYCLK, &num_levels) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->count = num_levels;
-
- for (i = 0; i < num_levels; i++) {
- PP_ASSERT_WITH_CODE(vega12_get_dpm_frequency_by_index(hwmgr,
- PPCLK_PHYCLK, i, &clock) == 0,
- "[SetupDefaultDPMTables] Failed to get DPM levels from SMU for PHYCLK!",
- return -EINVAL);
-
- dpm_table->dpm_levels[i].value = clock;
- dpm_table->dpm_levels[i].enabled = true;
- }
-
+ if (data->smu_features[GNLD_DPM_DCEFCLK].enabled) {
+ ret = vega12_setup_single_dpm_table(hwmgr, dpm_table, PPCLK_PHYCLK);
+ PP_ASSERT_WITH_CODE(!ret,
+ "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
+ return ret);
+ } else
+ dpm_table->count = 0;
vega12_init_dpm_state(&(dpm_table->dpm_state));
/* save a copy of the default DPM table */
@@ -803,6 +734,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
@@ -844,6 +778,21 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_init_powergate_state(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+
+ data->uvd_power_gated = true;
+ data->vce_power_gated = true;
+
+ if (data->smu_features[GNLD_DPM_UVD].enabled)
+ data->uvd_power_gated = false;
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled)
+ data->vce_power_gated = false;
+}
+
static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
@@ -862,12 +811,11 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? true : false;
data->smu_features[i].enabled = enabled;
data->smu_features[i].supported = enabled;
- PP_ASSERT(
- !data->smu_features[i].allowed || enabled,
- "[EnableAllSMUFeatures] Enabled feature is different from allowed, expected disabled!");
}
}
+ vega12_init_powergate_state(hwmgr);
+
return 0;
}
@@ -923,6 +871,48 @@ static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
return result;
}
+static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
+ PPCLK_e clkid, struct vega12_clock_range *clock)
+{
+ /* AC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max ac clock from SMC!",
+ return -EINVAL);
+ vega12_read_arg_from_smc(hwmgr, &(clock->ACMax));
+
+ /* AC Min */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get min ac clock from SMC!",
+ return -EINVAL);
+ vega12_read_arg_from_smc(hwmgr, &(clock->ACMin));
+
+ /* DC Max */
+ PP_ASSERT_WITH_CODE(
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ "[GetClockRanges] Failed to get max dc clock from SMC!",
+ return -EINVAL);
+ vega12_read_arg_from_smc(hwmgr, &(clock->DCMax));
+
+ return 0;
+}
+
+static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data =
+ (struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t i;
+
+ for (i = 0; i < PPCLK_COUNT; i++)
+ PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr,
+ i, &(data->clk_range[i])),
+ "Failed to get clk range from SMC!",
+ return -EINVAL);
+
+ return 0;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -950,6 +940,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to power control set level!",
result = tmp_result);
+ result = vega12_get_all_clock_ranges(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to get all clock ranges!",
+ return result);
+
result = vega12_odn_initialize_default_settings(hwmgr);
PP_ASSERT_WITH_CODE(!result,
"Failed to power control set level!",
@@ -978,76 +973,172 @@ static uint32_t vega12_find_lowest_dpm_level(
break;
}
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
return i;
}
static uint32_t vega12_find_highest_dpm_level(
struct vega12_single_dpm_table *table)
{
- uint32_t i = 0;
+ int32_t i = 0;
+ PP_ASSERT_WITH_CODE(table->count <= MAX_REGULAR_DPM_NUMBER,
+ "[FindHighestDPMLevel] DPM Table has too many entries!",
+ return MAX_REGULAR_DPM_NUMBER - 1);
- if (table->count <= MAX_REGULAR_DPM_NUMBER) {
- for (i = table->count; i > 0; i--) {
- if (table->dpm_levels[i - 1].enabled)
- return i - 1;
- }
- } else {
- pr_info("DPM Table Has Too Many Entries!");
- return MAX_REGULAR_DPM_NUMBER - 1;
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
}
- return i;
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return (uint32_t)i;
}
static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_boot_level !=
- data->dpm_table.gfx_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value);
- data->dpm_table.gfx_table.dpm_state.soft_min_level =
- data->smc_state_table.gfx_boot_level;
+ uint32_t min_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min gfxclk !",
+ return ret);
}
- if (data->smc_state_table.mem_boot_level !=
- data->dpm_table.mem_table.dpm_state.soft_min_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMinByFreq,
- PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value);
- data->dpm_table.mem_table.dpm_state.soft_min_level =
- data->smc_state_table.mem_boot_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min memclk !",
+ return ret);
+
+ min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set hard min memclk !",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ min_freq = data->dpm_table.vclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min vclk!",
+ return ret);
+
+ min_freq = data->dpm_table.dclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ min_freq = data->dpm_table.eclk_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ min_freq = data->dpm_table.soc_table.dpm_state.soft_min_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMinByFreq,
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ "Failed to set soft min socclk!",
+ return ret);
+ }
+
+ return ret;
}
static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = hwmgr->backend;
- if (data->smc_state_table.gfx_max_level !=
- data->dpm_table.gfx_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1));
- data->dpm_table.gfx_table.dpm_state.soft_max_level =
- data->smc_state_table.gfx_max_level;
+ uint32_t max_freq;
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_GFXCLK].enabled) {
+ max_freq = data->dpm_table.gfx_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max gfxclk!",
+ return ret);
}
- if (data->smc_state_table.mem_max_level !=
- data->dpm_table.mem_table.dpm_state.soft_max_level) {
- smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetSoftMaxByFreq,
- /* plus the vale by 1 to align the resolution */
- PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1));
- data->dpm_table.mem_table.dpm_state.soft_max_level =
- data->smc_state_table.mem_max_level;
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ max_freq = data->dpm_table.mem_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max memclk!",
+ return ret);
}
- return 0;
+ if (data->smu_features[GNLD_DPM_UVD].enabled) {
+ max_freq = data->dpm_table.vclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max vclk!",
+ return ret);
+
+ max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max dclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_VCE].enabled) {
+ max_freq = data->dpm_table.eclk_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max eclk!",
+ return ret);
+ }
+
+ if (data->smu_features[GNLD_DPM_SOCCLK].enabled) {
+ max_freq = data->dpm_table.soc_table.dpm_state.soft_max_level;
+
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
+ hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ "Failed to set soft max socclk!",
+ return ret);
+ }
+
+ return ret;
}
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
@@ -1136,8 +1227,8 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return -1);
PP_ASSERT_WITH_CODE(
@@ -1306,9 +1397,9 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
(struct vega12_hwmgr *)(hwmgr->backend);
struct PP_Clocks min_clocks = {0};
struct pp_display_clock_request clock_req;
- uint32_t clk_request;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
@@ -1333,15 +1424,6 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
}
}
- if (data->smu_features[GNLD_DPM_UCLK].enabled) {
- clk_request = (PPCLK_UCLK << 16) | (min_clocks.memoryClock) / 100;
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinByFreq, clk_request) == 0,
- "[PhwVega12_NotifySMCDisplayConfigAfterPowerStateAdjustment] Attempt to set UCLK HardMin Failed!",
- return -1);
- data->dpm_table.mem_table.dpm_state.hard_min_level = min_clocks.memoryClock;
- }
-
return 0;
}
@@ -1350,12 +1432,19 @@ static int vega12_force_dpm_highest(struct pp_hwmgr *hwmgr)
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+ uint32_t soft_level;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+ soft_level = vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1372,13 +1461,19 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data =
(struct vega12_hwmgr *)(hwmgr->backend);
+ uint32_t soft_level;
+
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.gfx_boot_level =
- data->smc_state_table.gfx_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- data->smc_state_table.mem_max_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_level].value;
+
+ soft_level = vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
+
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_level].value;
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload boot level to highest!",
@@ -1394,17 +1489,6 @@ static int vega12_force_dpm_lowest(struct pp_hwmgr *hwmgr)
static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
{
- struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- data->smc_state_table.gfx_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.gfx_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.gfx_table));
- data->smc_state_table.mem_boot_level =
- vega12_find_lowest_dpm_level(&(data->dpm_table.mem_table));
- data->smc_state_table.mem_max_level =
- vega12_find_highest_dpm_level(&(data->dpm_table.mem_table));
-
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
"Failed to upload DPM Bootup Levels!",
return -1);
@@ -1412,22 +1496,28 @@ static int vega12_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
"Failed to upload DPM Max Levels!",
return -1);
+
return 0;
}
-#if 0
static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
{
- struct phm_ppt_v2_information *table_info =
- (struct phm_ppt_v2_information *)(hwmgr->pptable);
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+ struct vega12_single_dpm_table *soc_dpm_table = &(data->dpm_table.soc_table);
- if (table_info->vdd_dep_on_sclk->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
- table_info->vdd_dep_on_socclk->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL &&
- table_info->vdd_dep_on_mclk->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA12_UMD_PSTATE_SOCCLK_LEVEL) {
*sclk_mask = VEGA12_UMD_PSTATE_GFXCLK_LEVEL;
- *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA12_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA12_UMD_PSTATE_SOCCLK_LEVEL;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -1435,13 +1525,13 @@ static int vega12_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
*mclk_mask = 0;
} else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
- *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
- *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
- *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
}
+
return 0;
}
-#endif
static void vega12_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
{
@@ -1465,11 +1555,9 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
int ret = 0;
-#if 0
uint32_t sclk_mask = 0;
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
-#endif
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
@@ -1485,27 +1573,18 @@ static int vega12_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
-#if 0
ret = vega12_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
if (ret)
return ret;
- vega12_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
- vega12_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
-#endif
+ vega12_force_clock_level(hwmgr, PP_SCLK, 1 << sclk_mask);
+ vega12_force_clock_level(hwmgr, PP_MCLK, 1 << mclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
}
-#if 0
- if (!ret) {
- if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
- else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
- vega12_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
- }
-#endif
+
return ret;
}
@@ -1539,24 +1618,14 @@ static int vega12_get_clock_ranges(struct pp_hwmgr *hwmgr,
PPCLK_e clock_select,
bool max)
{
- int result;
- *clock = 0;
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
- if (max) {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get max clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- } else {
- PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16)) == 0,
- "[GetClockRanges] Failed to get min clock from SMC!",
- return -1);
- result = vega12_read_arg_from_smc(hwmgr, clock);
- }
+ if (max)
+ *clock = data->clk_range[clock_select].ACMax;
+ else
+ *clock = data->clk_range[clock_select].ACMin;
- return result;
+ return 0;
}
static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
@@ -1571,12 +1640,12 @@ static int vega12_get_sclks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.gfx_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1603,13 +1672,12 @@ static int vega12_get_memclocks(struct pp_hwmgr *hwmgr,
return -1;
dpm_table = &(data->dpm_table.mem_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_UCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_UCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
- clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
-
+ clocks->data[i].clocks_in_khz = dpm_table->dpm_levels[i].value * 1000;
+ data->mclk_latency_table.entries[i].frequency = dpm_table->dpm_levels[i].value * 100;
clocks->data[i].latency_in_us =
data->mclk_latency_table.entries[i].latency =
vega12_get_mem_latency(hwmgr, dpm_table->dpm_levels[i].value);
@@ -1633,12 +1701,12 @@ static int vega12_get_dcefclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.dcef_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1661,12 +1729,12 @@ static int vega12_get_socclocks(struct pp_hwmgr *hwmgr,
dpm_table = &(data->dpm_table.soc_table);
- ucount = (dpm_table->count > VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS) ?
- VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS : dpm_table->count;
+ ucount = (dpm_table->count > MAX_NUM_CLOCKS) ?
+ MAX_NUM_CLOCKS : dpm_table->count;
for (i = 0; i < ucount; i++) {
clocks->data[i].clocks_in_khz =
- dpm_table->dpm_levels[i].value * 100;
+ dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
}
@@ -1713,99 +1781,69 @@ static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
}
static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
+ void *clock_ranges)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
Watermarks_t *table = &(data->smc_state_table.water_marks_table);
- int result = 0;
- uint32_t i;
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges = clock_ranges;
if (!data->registry_data.disable_water_mark &&
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported) {
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
- table->WatermarkRow[WM_DCEFCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
- }
-
- for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
- table->WatermarkRow[WM_SOCCLK][i].MinClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxClock =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MinUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
- cpu_to_le16((uint16_t)
- (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
- 100);
- table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
- wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
- }
+ smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges);
data->water_marks_bitmap |= WaterMarksExist;
data->water_marks_bitmap &= ~WaterMarksLoaded;
}
- return result;
+ return 0;
}
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
-
- if (hwmgr->request_dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
- AMD_DPM_FORCED_LEVEL_LOW |
- AMD_DPM_FORCED_LEVEL_HIGH))
- return -EINVAL;
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
switch (type) {
case PP_SCLK:
- data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ data->dpm_table.gfx_table.dpm_state.soft_min_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.gfx_table.dpm_state.soft_max_level =
+ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
case PP_MCLK:
- data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0;
- data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0;
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr),
+ data->dpm_table.mem_table.dpm_state.soft_min_level =
+ data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
+ data->dpm_table.mem_table.dpm_state.soft_max_level =
+ data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
+
+ ret = vega12_upload_dpm_min_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload boot level to lowest!",
- return -EINVAL);
+ return ret);
- PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr),
+ ret = vega12_upload_dpm_max_level(hwmgr);
+ PP_ASSERT_WITH_CODE(!ret,
"Failed to upload dpm max level to highest!",
- return -EINVAL);
+ return ret);
break;
@@ -1838,8 +1876,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
case PP_MCLK:
@@ -1854,8 +1892,8 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return -1);
for (i = 0; i < clocks.num_levels; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
- i, clocks.data[i].clocks_in_khz / 100,
- (clocks.data[i].clocks_in_khz == now) ? "*" : "");
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
break;
case PP_PCIE:
@@ -1867,6 +1905,205 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
return size;
}
+static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
+ !hwmgr->display_config->multi_monitor_in_sync) ||
+ vblank_too_short;
+ latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(data->dpm_table.gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* memclk */
+ dpm_table = &(data->dpm_table.mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (hwmgr->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = hwmgr->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < data->mclk_latency_table.count - 1; i++) {
+ if (data->mclk_latency_table.entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (hwmgr->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (hwmgr->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(data->dpm_table.vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* dclk */
+ dpm_table = &(data->dpm_table.dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* socclk */
+ dpm_table = &(data->dpm_table.soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ /* eclk */
+ dpm_table = &(data->dpm_table.eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
+ if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ }
+
+ return 0;
+}
+
+static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
+ struct vega12_single_dpm_table *dpm_table)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled) {
+ PP_ASSERT_WITH_CODE(dpm_table->count > 0,
+ "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
+ return -EINVAL);
+ PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_UCLK_DPM_LEVELS,
+ "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
+ return -EINVAL);
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
+ return ret);
+ }
+
+ return ret;
+}
+
+static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays, 0);
+
+ ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
+ &data->dpm_table.mem_table);
+
+ return ret;
+}
+
static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
@@ -1911,6 +2148,9 @@ static void vega12_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->vce_power_gated == bgate)
+ return;
+
data->vce_power_gated = bgate;
vega12_enable_disable_vce_dpm(hwmgr, !bgate);
}
@@ -1919,6 +2159,9 @@ static void vega12_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ if (data->uvd_power_gated == bgate)
+ return;
+
data->uvd_power_gated = bgate;
vega12_enable_disable_uvd_dpm(hwmgr, !bgate);
}
@@ -2113,6 +2356,10 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.display_clock_voltage_request = vega12_display_clock_voltage_request,
.force_clock_level = vega12_force_clock_level,
.print_clock_levels = vega12_print_clock_levels,
+ .apply_clocks_adjust_rules =
+ vega12_apply_clocks_adjust_rules,
+ .pre_display_config_changed =
+ vega12_pre_display_configuration_changed_task,
.display_config_changed = vega12_display_configuration_changed_task,
.powergate_uvd = vega12_power_gate_uvd,
.powergate_vce = vega12_power_gate_vce,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..e17237c90eea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
uint32_t mem_clock;
uint32_t soc_clock;
uint32_t dcef_clock;
+ uint32_t eclock;
+ uint32_t dclock;
+ uint32_t vclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -301,6 +304,12 @@ struct vega12_odn_fan_table {
bool force_fan_pwm;
};
+struct vega12_clock_range {
+ uint32_t ACMax;
+ uint32_t ACMin;
+ uint32_t DCMax;
+};
+
struct vega12_hwmgr {
struct vega12_dpm_table dpm_table;
struct vega12_dpm_table golden_dpm_table;
@@ -382,6 +391,8 @@ struct vega12_hwmgr {
uint32_t smu_version;
struct smu_features smu_features[GNLD_FEATURES_MAX];
struct vega12_smc_state_table smc_state_table;
+
+ struct vega12_clock_range clk_range[PPCLK_COUNT];
};
#define VEGA12_DPM2_NEAR_TDP_DEC 10
@@ -432,6 +443,8 @@ struct vega12_hwmgr {
#define VEGA12_UMD_PSTATE_GFXCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_SOCCLK_LEVEL 0x3
#define VEGA12_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA12_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA12_UMD_PSTATE_VCEMCLK_LEVEL 0x3
int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..f4f366b26fd1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -224,11 +224,7 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent;
ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq;
- /* 0xFFFF will disable the ACG feature */
- if (!(hwmgr->feature_mask & PP_ACG_MASK)) {
- ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF;
- ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
- }
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index a202247c9894..429c9c4322da 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -455,7 +455,7 @@ extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ void *clock_ranges);
extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 40c98ca5feb7..b3363f26039a 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -293,8 +293,7 @@ struct pp_hwmgr_func {
int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks);
- int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr,
- struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges);
+ int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges);
int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock);
int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
@@ -302,7 +301,7 @@ struct pp_hwmgr_func {
int (*power_off_asic)(struct pp_hwmgr *hwmgr);
int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
- int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
+ int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable);
int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
@@ -327,7 +326,7 @@ struct pp_hwmgr_func {
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size);
int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
- int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr);
+ int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -412,10 +412,10 @@ typedef struct {
QuadraticInt_t ReservedEquation2;
QuadraticInt_t ReservedEquation3;
- uint16_t MinVoltageUlvGfx;
- uint16_t MinVoltageUlvSoc;
+ uint16_t MinVoltageUlvGfx;
+ uint16_t MinVoltageUlvSoc;
- uint32_t Reserved[14];
+ uint32_t Reserved[14];
@@ -483,9 +483,9 @@ typedef struct {
uint8_t padding8_4;
- uint8_t PllGfxclkSpreadEnabled;
- uint8_t PllGfxclkSpreadPercent;
- uint16_t PllGfxclkSpreadFreq;
+ uint8_t PllGfxclkSpreadEnabled;
+ uint8_t PllGfxclkSpreadPercent;
+ uint16_t PllGfxclkSpreadFreq;
uint8_t UclkSpreadEnabled;
uint8_t UclkSpreadPercent;
@@ -495,11 +495,14 @@ typedef struct {
uint8_t SocclkSpreadPercent;
uint16_t SocclkSpreadFreq;
- uint8_t AcgGfxclkSpreadEnabled;
- uint8_t AcgGfxclkSpreadPercent;
- uint16_t AcgGfxclkSpreadFreq;
+ uint8_t AcgGfxclkSpreadEnabled;
+ uint8_t AcgGfxclkSpreadPercent;
+ uint16_t AcgGfxclkSpreadFreq;
- uint32_t BoardReserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t BoardReserved[9];
uint32_t MmHubPadding[7];
OpenPOWER on IntegriCloud