summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c55
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c369
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c67
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c1
38 files changed, 844 insertions, 78 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 58f6b3b92831..4f76beafb2fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3109,7 +3109,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
int r;
DRM_INFO("amdgpu: finishing device.\n");
+ flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -3127,7 +3129,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->firmware.gpu_info_fw = NULL;
}
adev->accel_working = false;
- cancel_delayed_work_sync(&adev->delayed_init_work);
/* free i2c buses */
if (!amdgpu_device_has_dc_support(adev))
amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index d2dd59a95e8a..3cadb0b76f22 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -514,7 +514,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
* Also, don't allow GTT domain if the BO doens't have USWC falg set.
*/
if (adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN &&
+ adev->asic_type < CHIP_RAVEN &&
(adev->flags & AMD_IS_APU) &&
(bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
amdgpu_bo_support_uswc(bo_flags) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 557be89421a8..0ffc9447b573 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -998,10 +998,10 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
/* Navi14 */
- {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
- {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
+ {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
/* Renoir */
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index c9d1fada6188..e00b46180d2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -454,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
}
ring = &adev->gfx.kiq.ring;
- if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring)
- kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]);
kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index a74ecd449775..0ae0a2715b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -225,7 +225,7 @@ struct amdgpu_me {
uint32_t num_me;
uint32_t num_pipe_per_me;
uint32_t num_queue_per_pipe;
- void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1];
+ void *mqd_backup[AMDGPU_MAX_GFX_RINGS];
/* These are the resources for which amdgpu takes ownership */
DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 406736a1bd3d..b499a3de8bb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -77,6 +77,7 @@ struct amdgpu_gmc_fault {
struct amdgpu_vmhub {
uint32_t ctx0_ptb_addr_lo32;
uint32_t ctx0_ptb_addr_hi32;
+ uint32_t vm_inv_eng0_sem;
uint32_t vm_inv_eng0_req;
uint32_t vm_inv_eng0_ack;
uint32_t vm_context0_cntl;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 82c46c4faaad..b6db28a570c2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -655,15 +655,19 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return -ENOMEM;
alloc_size = info->read_mmr_reg.count * sizeof(*regs);
- for (i = 0; i < info->read_mmr_reg.count; i++)
+ amdgpu_gfx_off_ctrl(adev, false);
+ for (i = 0; i < info->read_mmr_reg.count; i++) {
if (amdgpu_asic_read_register(adev, se_num, sh_num,
info->read_mmr_reg.dword_offset + i,
&regs[i])) {
DRM_DEBUG_KMS("unallowed offset %#x\n",
info->read_mmr_reg.dword_offset + i);
kfree(regs);
+ amdgpu_gfx_off_ctrl(adev, true);
return -EFAULT;
}
+ }
+ amdgpu_gfx_off_ctrl(adev, true);
n = copy_to_user(out, regs, min(size, alloc_size));
kfree(regs);
return n ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index bbe9ac7e843f..2770cba56a6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -567,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp)
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
- if (!psp->adev->psp.ta_fw)
+ if (!psp->adev->psp.ta_fw ||
+ !psp->adev->psp.ta_xgmi_ucode_size ||
+ !psp->adev->psp.ta_xgmi_start_addr)
return -ENOENT;
if (!psp->xgmi_context.initialized) {
@@ -756,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->ras.ras_initialized)
return 0;
@@ -777,6 +785,18 @@ static int psp_ras_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_ras_ucode_size ||
+ !psp->adev->psp.ta_ras_start_addr) {
+ dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->ras.ras_initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
@@ -866,6 +886,18 @@ static int psp_hdcp_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_hdcp_ucode_size ||
+ !psp->adev->psp.ta_hdcp_start_addr) {
+ dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->hdcp_context.hdcp_initialized) {
ret = psp_hdcp_init_shared_buf(psp);
if (ret)
@@ -948,6 +980,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->hdcp_context.hdcp_initialized)
return 0;
@@ -1039,6 +1077,18 @@ static int psp_dtm_initialize(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the initialize in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ if (!psp->adev->psp.ta_dtm_ucode_size ||
+ !psp->adev->psp.ta_dtm_start_addr) {
+ dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ return 0;
+ }
+
if (!psp->dtm_context.dtm_initialized) {
ret = psp_dtm_init_shared_buf(psp);
if (ret)
@@ -1091,6 +1141,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
{
int ret;
+ /*
+ * TODO: bypass the terminate in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
if (!psp->dtm_context.dtm_initialized)
return 0;
@@ -1411,7 +1467,10 @@ out:
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
|| ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G))
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM))
/*skip ucode loading in SRIOV VF */
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index f940526c5889..63e734a125fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -473,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __string(ring, sched_job->base.sched->name);
+ __string(ring, sched_job->base.sched->name)
__field(uint64_t, id)
__field(struct dma_fence *, fence)
__field(uint64_t, ctx)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index b4dd89af6f09..e324bfe6c58f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -299,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 703677f2ff6f..46b590af2fd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -216,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 3199e4a5ff12..9d870444d7d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i, j;
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
&adev->vcn.dpg_sram_gpu_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a93dd3dc0902..ca5f0e7ea1ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
-static void gfx_v10_0_init_csb(struct amdgpu_device *adev)
+static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
{
+ int r;
+
+ if (adev->in_gpu_reset) {
+ r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (!r) {
+ adev->gfx.rlc.funcs->get_csb_buffer(adev,
+ adev->gfx.rlc.cs_ptr);
+ amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj);
+ }
+
+ amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
+ if (r)
+ return r;
+ }
+
/* csib */
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+
+ return 0;
}
-static void gfx_v10_0_init_pg(struct amdgpu_device *adev)
+static int gfx_v10_0_init_pg(struct amdgpu_device *adev)
{
int i;
+ int r;
- gfx_v10_0_init_csb(adev);
+ r = gfx_v10_0_init_csb(adev);
+ if (r)
+ return r;
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
/* TODO: init power gating */
- return;
+ return 0;
}
void gfx_v10_0_rlc_stop(struct amdgpu_device *adev)
@@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
r = gfx_v10_0_wait_for_rlc_autoload_complete(adev);
if (r)
return r;
- gfx_v10_0_init_pg(adev);
+
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
/* enable RLC SRM */
gfx_v10_0_rlc_enable_srm(adev);
@@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- gfx_v10_0_init_pg(adev);
+ r = gfx_v10_0_init_pg(adev);
+ if (r)
+ return r;
+
adev->gfx.rlc.funcs->start(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
@@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
+static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
@@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
- udelay(50);
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt");
+
+ return 0;
}
static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev)
@@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
+ int mqd_idx = ring - &adev->gfx.gfx_ring[0];
if (!adev->in_gpu_reset && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
@@ -3125,12 +3167,12 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
#endif
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
} else if (adev->in_gpu_reset) {
/* reset mqd with the backup copy */
- if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS])
- memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd));
+ if (adev->gfx.me.mqd_backup[mqd_idx])
+ memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
/* reset the ring */
ring->wptr = 0;
adev->wb.wb[ring->wptr_offs] = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 3ebd5c20dfd3..faf2ffce5837 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -704,6 +704,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
@@ -1051,8 +1052,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
- &&((adev->gfx.rlc_fw_version != 106 &&
+ /* Disable GFXOFF on original raven. There are combinations
+ * of sbios and platforms that are not stable.
+ */
+ if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ &&((adev->gfx.rlc_fw_version != 106 &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 9ec4297e61e5..e91bd7945777 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -367,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b4f32d853ca1..b70c7b483c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -356,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(GC, 0,
mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 27f68d32bfec..321f8a997be8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
const unsigned eng = 17;
unsigned int i;
+ spin_lock(&adev->gmc.invalidate_lock);
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (i = 0; i < adev->usec_timeout; i++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (i >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
udelay(1);
}
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
+ spin_unlock(&adev->gmc.invalidate_lock);
+
if (i < adev->usec_timeout)
return;
@@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 9f2a893871ec..3c355fb5d2b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -459,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
}
spin_lock(&adev->gmc.invalidate_lock);
+
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1) {
+ for (j = 0; j < adev->usec_timeout; j++) {
+ /* a read return value of 1 means semaphore acuqire */
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
+ if (tmp & 0x1)
+ break;
+ udelay(1);
+ }
+
+ if (j >= adev->usec_timeout)
+ DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
+ }
+
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
/*
@@ -474,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
break;
udelay(1);
}
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (vmhub == AMDGPU_MMHUB_0 ||
+ vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
+
spin_unlock(&adev->gmc.invalidate_lock);
+
if (j < adev->usec_timeout)
return;
@@ -489,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0);
unsigned eng = ring->vm_inv_eng;
+ /*
+ * It may lose gpuvm invalidate acknowldege state across power-gating
+ * off cycle, add semaphore acquire before invalidation and semaphore
+ * release after invalidation to avoid entering power gated state
+ * to WA the Issue
+ */
+
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /* a read return value of 1 means semaphore acuqire */
+ amdgpu_ring_emit_reg_wait(ring,
+ hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
+
amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
lower_32_bits(pd_addr));
@@ -499,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
hub->vm_inv_eng0_ack + eng,
req, 1 << vmid);
+ /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
+ if (ring->funcs->vmhub == AMDGPU_MMHUB_0 ||
+ ring->funcs->vmhub == AMDGPU_MMHUB_1)
+ /*
+ * add semaphore release after invalidation,
+ * write with 0 means semaphore release
+ */
+ amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
+
return pd_addr;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 6965e1e6fa9e..28105e4af507 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -420,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 945533634711..a7cb185d639a 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -348,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev)
hub->ctx0_ptb_addr_hi32 =
SOC15_REG_OFFSET(MMHUB, 0,
mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM);
hub->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ);
hub->vm_inv_eng0_ack =
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 2c5adfe803a2..66efe2f7bd76 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -504,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev)
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) +
i * MMHUB_INSTANCE_REGISTER_OFFSET;
+ hub[i]->vm_inv_eng0_sem =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ mmVML2VC0_VM_INVALIDATE_ENG0_SEM) +
+ i * MMHUB_INSTANCE_REGISTER_OFFSET;
hub[i]->vm_inv_eng0_req =
SOC15_REG_OFFSET(MMHUB, 0,
mmVML2VC0_VM_INVALIDATE_ENG0_REQ) +
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index af68f9815f28..0ba66bef5746 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -40,6 +40,7 @@
#include "gc/gc_10_1_0_sh_mask.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "hdp/hdp_5_0_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
#include "soc15.h"
#include "soc15_common.h"
@@ -156,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev)
static bool nv_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
- /* TODO: will implement it when SMU header is available */
- return false;
+ u32 *dw_ptr;
+ u32 i, length_dw;
+
+ if (bios == NULL)
+ return false;
+ if (length_bytes == 0)
+ return false;
+ /* APU vbios image is part of sbios image */
+ if (adev->flags & AMD_IS_APU)
+ return false;
+
+ dw_ptr = (u32 *)bios;
+ length_dw = ALIGN(length_bytes, 4) / 4;
+
+ /* set rom index to 0 */
+ WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ /* read out the rom data */
+ for (i = 0; i < length_dw; i++)
+ dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+
+ return true;
}
static struct soc15_allowed_register_entry nv_allowed_read_registers[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 57bb5f9e08b2..88ae27a5a03d 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev)
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
si_ih_disable_interrupts(adev);
- WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8);
+ /* set dummy read address to dummy page address */
+ WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 9af6c6ffbfa2..57af489a5de3 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,8 +28,8 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
-#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4
-#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1
+#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
+#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
extern const struct amd_ip_funcs soc15_common_ip_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 03083e5f731a..93edf9193a7b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -293,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
- int i;
+ int i, j;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
@@ -305,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle)
ring->sched.ready = false;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst[i].ring_enc[i];
+ for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ ring = &adev->vcn.inst[i].ring_enc[j];
ring->sched.ready = false;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index caba9ecac723..7aac9568d3be 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -719,7 +719,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
- adev->asic_type <= CHIP_RAVEN)
+ adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 49cf39711dfc..2bf8534c18fb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -36,7 +36,9 @@
#include "dc_link_ddc.h"
#include "i2caux_interface.h"
-
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
/* #define TRACE_DPCD */
#ifdef TRACE_DPCD
@@ -147,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+ amdgpu_dm_connector->debugfs_dpcd_address = 0;
+ amdgpu_dm_connector->debugfs_dpcd_size = 0;
+#endif
+
return drm_dp_mst_connector_late_register(connector, port);
}
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 1e2da4d37567..40b546c75fc2 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -591,10 +591,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
smu_table->power_play_table = smu_table->hardcode_pptable;
smu_table->power_play_table_size = size;
+ /*
+ * Special hw_fini action(for Navi1x, the DPMs disablement will be
+ * skipped) may be needed for custom pptable uploading.
+ */
+ smu->uploading_custom_pp_table = true;
+
ret = smu_reset(smu);
if (ret)
pr_info("smu reset failed, ret = %d\n", ret);
+ smu->uploading_custom_pp_table = false;
+
failed:
mutex_unlock(&smu->mutex);
return ret;
@@ -719,6 +727,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
vega20_set_ppt_funcs(smu);
break;
case CHIP_NAVI10:
@@ -727,6 +736,7 @@ static int smu_set_funcs(struct amdgpu_device *adev)
navi10_set_ppt_funcs(smu);
break;
case CHIP_ARCTURUS:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
arcturus_set_ppt_funcs(smu);
/* OD is not supported on Arcturus */
smu->od_enabled =false;
@@ -1068,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
if (adev->asic_type != CHIP_ARCTURUS) {
- ret = smu_override_pcie_parameters(smu);
- if (ret)
- return ret;
-
ret = smu_notify_display_change(smu);
if (ret)
return ret;
@@ -1100,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ if (adev->asic_type != CHIP_ARCTURUS) {
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+ }
+
ret = smu_set_default_od_settings(smu, initialize);
if (ret)
return ret;
@@ -1109,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
if (ret)
return ret;
- ret = smu_get_power_limit(smu, &smu->default_power_limit, true, false);
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false);
if (ret)
return ret;
}
@@ -1293,10 +1305,25 @@ static int smu_hw_fini(void *handle)
return ret;
}
- ret = smu_stop_dpms(smu);
- if (ret) {
- pr_warn("Fail to stop Dpms!\n");
- return ret;
+ /*
+ * For custom pptable uploading, skip the DPM features
+ * disable process on Navi1x ASICs.
+ * - As the gfx related features are under control of
+ * RLC on those ASICs. RLC reinitialization will be
+ * needed to reenable them. That will cost much more
+ * efforts.
+ *
+ * - SMU firmware can handle the DPM reenablement
+ * properly.
+ */
+ if (!smu->uploading_custom_pp_table ||
+ !((adev->asic_type >= CHIP_NAVI10) &&
+ (adev->asic_type <= CHIP_NAVI12))) {
+ ret = smu_stop_dpms(smu);
+ if (ret) {
+ pr_warn("Fail to stop Dpms!\n");
+ return ret;
+ }
}
kfree(table_context->driver_pptable);
@@ -2511,3 +2538,13 @@ int smu_get_dpm_clock_table(struct smu_context *smu,
return ret;
}
+
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
+{
+ uint32_t ret = 0;
+
+ if (smu->ppt_funcs->get_pptable_power_limit)
+ ret = smu->ppt_funcs->get_pptable_power_limit(smu);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 3099ac256bd3..58c7c4a3053e 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -1261,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu,
static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1292,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -2070,6 +2063,13 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
i2c_del_adapter(control);
}
+static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2160,6 +2160,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .get_pptable_power_limit = arcturus_get_pptable_power_limit,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index a24beaa4fb01..d2909c91d65b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr)
int hwmgr_early_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev;
+
if (!hwmgr)
return -EINVAL;
@@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_init_workload_prority(hwmgr);
hwmgr->gfxoff_state_changed_by_workload = false;
+ adev = hwmgr->adev;
+
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &ci_smu_funcs;
ci_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK |
@@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
smu7_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_CZ:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->od_enabled = false;
hwmgr->smumgr_funcs = &smu8_smu_funcs;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
smu8_init_function_pointers(hwmgr);
break;
case AMDGPU_FAMILY_VI:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
switch (hwmgr->chip_id) {
case CHIP_TOPAZ:
@@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
case AMDGPU_FAMILY_AI:
switch (hwmgr->chip_id) {
case CHIP_VEGA10:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega10_smu_funcs;
vega10_hwmgr_init(hwmgr);
@@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
vega12_hwmgr_init(hwmgr);
break;
case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
hwmgr->feature_mask &= ~PP_GFXOFF_MASK;
hwmgr->smumgr_funcs = &vega20_smu_funcs;
vega20_hwmgr_init(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index c805c6fcdba2..f73dff68e799 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3477,18 +3477,31 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int i;
u32 tmp = 0;
if (!query)
return -EINVAL;
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
- *query = tmp;
+ /*
+ * PPSMC_MSG_GetCurrPkgPwr is not supported on:
+ * - Hawaii
+ * - Bonaire
+ * - Fiji
+ * - Tonga
+ */
+ if ((adev->asic_type != CHIP_HAWAII) &&
+ (adev->asic_type != CHIP_BONAIRE) &&
+ (adev->asic_type != CHIP_FIJI) &&
+ (adev->asic_type != CHIP_TONGA)) {
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
+ tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ *query = tmp;
- if (tmp != 0)
- return 0;
+ if (tmp != 0)
+ return 0;
+ }
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
@@ -3969,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
"Failed to populate and upload SCLK MCLK DPM levels!",
result = tmp_result);
+ /*
+ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
+ * That effectively disables AVFS feature.
+ */
+ if (hwmgr->hardcode_pp_table != NULL)
+ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
+
tmp_result = smu7_update_avfs(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to update avfs voltages!",
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 8120e7587585..031e0c22fcc7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -261,7 +261,6 @@ struct smu_table_context
struct smu_table *tables;
struct smu_table memory_pool;
uint8_t thermal_controller_type;
- uint16_t TDPODLimit;
void *overdrive_table;
};
@@ -391,6 +390,7 @@ struct smu_context
uint32_t smc_if_version;
+ bool uploading_custom_pp_table;
};
struct i2c_adapter;
@@ -548,6 +548,7 @@ struct pptable_funcs {
int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max);
int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max);
int (*override_pcie_parameters)(struct smu_context *smu);
+ uint32_t (*get_pptable_power_limit)(struct smu_context *smu);
};
int smu_load_microcode(struct smu_context *smu);
@@ -717,4 +718,6 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table);
+uint32_t smu_get_pptable_power_limit(struct smu_context *smu);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index fd6ec9033d06..606149085683 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -48,6 +48,8 @@
#define SMU11_TOOL_SIZE 0x19000
+#define MAX_PCIE_CONF 2
+
#define CLK_MAP(clk, index) \
[SMU_##clk] = {1, (index)}
@@ -88,6 +90,11 @@ struct smu_11_0_dpm_table {
uint32_t max; /* MHz */
};
+struct smu_11_0_pcie_table {
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+};
+
struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table soc_table;
struct smu_11_0_dpm_table gfx_table;
@@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables {
struct smu_11_0_dpm_table display_table;
struct smu_11_0_dpm_table phy_table;
struct smu_11_0_dpm_table fclk_table;
+ struct smu_11_0_pcie_table pcie_table;
};
struct smu_11_0_dpm_context {
@@ -250,4 +258,8 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
int smu_v11_0_override_pcie_parameters(struct smu_context *smu);
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size);
+
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu);
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index 86cdc3393eac..b2f96a101124 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table
struct smu_11_0_power_saving_clock_table power_saving_clock;
struct smu_11_0_overdrive_table overdrive_table;
+#ifndef SMU_11_0_PARTIAL_PPTABLE
PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+#endif
} __attribute__((packed));
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 354f70978f82..aaec884d63ed 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -36,6 +36,7 @@
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -599,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
PPTable_t *driver_ppt = NULL;
+ int i;
driver_ppt = table_context->driver_pptable;
@@ -629,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0];
dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1];
+ for (i = 0; i < MAX_PCIE_CONF; i++) {
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i];
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i];
+ }
+
return 0;
}
@@ -691,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+{
+ return od_table->cap[feature];
+}
+
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
+ uint16_t *curve_settings;
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
uint32_t freq_values[3] = {0};
uint32_t mark_index = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ struct amdgpu_device *adev = smu->adev;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
switch (clk_type) {
case SMU_GFXCLK:
@@ -748,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu,
}
break;
+ case SMU_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ break;
+ size += sprintf(buf + size, "OD_SCLK:\n");
+ size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ break;
+ size += sprintf(buf + size, "OD_MCLK:\n");
+ size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ break;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ break;
+ size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
default:
break;
}
@@ -773,6 +859,12 @@ static int navi10_force_clk_levels(struct smu_context *smu,
case SMU_UCLK:
case SMU_DCEFCLK:
case SMU_FCLK:
+ /* There is only 2 levels for fine grained DPM */
+ if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ soft_max_level = (soft_max_level >= 1 ? 1 : 0);
+ soft_min_level = (soft_min_level >= 1 ? 1 : 0);
+ }
+
ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
if (ret)
return size;
@@ -1582,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
return ret;
}
+static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
+}
+
static int navi10_get_power_limit(struct smu_context *smu,
uint32_t *limit,
- bool asic_default)
+ bool cap)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
uint32_t asic_default_power_limit = 0;
int ret = 0;
int power_src;
- if (!smu->default_power_limit ||
- !smu->power_limit) {
+ if (!smu->power_limit) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
@@ -1615,17 +1712,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
- if (smu->od_enabled) {
- asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit);
- asic_default_power_limit /= 100;
- }
-
- smu->default_power_limit = asic_default_power_limit;
smu->power_limit = asic_default_power_limit;
}
- if (asic_default)
- *limit = smu->default_power_limit;
+ if (cap)
+ *limit = smu_v11_0_get_max_power_limit(smu);
else
*limit = smu->power_limit;
@@ -1640,6 +1731,9 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
int ret, i;
uint32_t smu_pcie_arg;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+
for (i = 0; i < NUM_LINK_LEVELS; i++) {
smu_pcie_arg = (i << 16) |
((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
@@ -1648,11 +1742,260 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
smu_pcie_arg);
+
+ if (ret)
+ return ret;
+
+ if (pptable->PcieGenSpeed[i] > pcie_gen_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
+ if (pptable->PcieLaneCount[i] > pcie_width_cap)
+ dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
+ }
+
+ return 0;
+}
+
+static inline void navi10_dump_od_table(OverDriveTable_t *od_table) {
+ pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
+ pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1);
+ pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2);
+ pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3);
+ pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax);
+ pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct);
+}
+
+static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value)
+{
+ if (value < od_table->min[setting]) {
+ pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]);
+ return -EINVAL;
+ }
+ if (value > od_table->max[setting]) {
+ pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int navi10_setup_od_limits(struct smu_context *smu) {
+ struct smu_11_0_overdrive_table *overdrive_table = NULL;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+
+ if (!smu->smu_table.power_play_table) {
+ pr_err("powerplay table uninitialized!\n");
+ return -ENOENT;
+ }
+ powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
+ overdrive_table = &powerplay_table->overdrive_table;
+ if (!smu->od_settings) {
+ smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
+ } else {
+ memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
+ }
+ return 0;
+}
+
+static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
+ OverDriveTable_t *od_table;
+ int ret = 0;
+
+ ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = navi10_setup_od_limits(smu);
+ if (ret) {
+ pr_err("Failed to retrieve board OD limits\n");
+ return ret;
+ }
+
}
+ od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
+ if (od_table) {
+ navi10_dump_od_table(od_table);
+ }
+
+ return ret;
+}
+
+static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
+ int i;
+ int ret = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table;
+ struct smu_11_0_overdrive_table *od_settings;
+ enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting;
+ uint16_t *freq_ptr, *voltage_ptr;
+ od_table = (OverDriveTable_t *)table_context->overdrive_table;
+
+ if (!smu->od_enabled) {
+ pr_warn("OverDrive is not enabled!\n");
+ return -EINVAL;
+ }
+
+ if (!smu->od_settings) {
+ pr_err("OD board limits are not set!\n");
+ return -ENOENT;
+ }
+
+ od_settings = smu->od_settings;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ pr_warn("GFXCLK_LIMITS not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (!table_context->overdrive_table) {
+ pr_err("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+ switch (input[i]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN;
+ freq_ptr = &od_table->GfxclkFmin;
+ if (input[i + 1] > od_table->GfxclkFmax) {
+ pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmin);
+ return -EINVAL;
+ }
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX;
+ freq_ptr = &od_table->GfxclkFmax;
+ if (input[i + 1] < od_table->GfxclkFmin) {
+ pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n",
+ input[i + 1],
+ od_table->GfxclkFmax);
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]);
+ pr_info("Supported indices: [0:min,1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[i + 1];
+ }
+ break;
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ pr_warn("UCLK_MAX not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 2) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (input[0] != 1) {
+ pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]);
+ pr_info("Supported indices: [1:max]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]);
+ if (ret)
+ return ret;
+ od_table->UclkFmax = input[1];
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ navi10_dump_od_table(od_table);
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ // no lock needed because smu_od_edit_dpm_table has it
+ ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ false);
+ if (ret) {
+ return ret;
+ }
+ break;
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ pr_warn("GFXCLK_CURVE not supported!\n");
+ return -ENOTSUPP;
+ }
+ if (size < 3) {
+ pr_info("invalid number of parameters: %d\n", size);
+ return -EINVAL;
+ }
+ if (!od_table) {
+ pr_info("Overdrive is not initialized\n");
+ return -EINVAL;
+ }
+
+ switch (input[0]) {
+ case 0:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1;
+ freq_ptr = &od_table->GfxclkFreq1;
+ voltage_ptr = &od_table->GfxclkVolt1;
+ break;
+ case 1:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2;
+ freq_ptr = &od_table->GfxclkFreq2;
+ voltage_ptr = &od_table->GfxclkVolt2;
+ break;
+ case 2:
+ freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3;
+ voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3;
+ freq_ptr = &od_table->GfxclkFreq3;
+ voltage_ptr = &od_table->GfxclkVolt3;
+ break;
+ default:
+ pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]);
+ pr_info("Supported indices: [0, 1, 2]\n");
+ return -EINVAL;
+ }
+ ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]);
+ if (ret)
+ return ret;
+ // Allow setting zero to disable the OverDrive VDDC curve
+ if (input[2] != 0) {
+ ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]);
+ if (ret)
+ return ret;
+ *freq_ptr = input[1];
+ *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE;
+ pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr);
+ } else {
+ // If setting 0, disable all voltage curve settings
+ od_table->GfxclkVolt1 = 0;
+ od_table->GfxclkVolt2 = 0;
+ od_table->GfxclkVolt3 = 0;
+ }
+ navi10_dump_od_table(od_table);
+ break;
+ default:
+ return -ENOSYS;
+ }
return ret;
}
+static int navi10_run_btc(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ if (ret)
+ pr_err("RunBtc failed!\n");
+
+ return ret;
+}
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
@@ -1742,6 +2085,10 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
+ .set_default_od_settings = navi10_set_default_od_settings,
+ .od_edit_dpm_table = navi10_od_edit_dpm_table,
+ .get_pptable_power_limit = navi10_get_pptable_power_limit,
+ .run_btc = navi10_run_btc,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index a37e37c5f105..ec03c7992f6d 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -33,6 +33,11 @@
#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717)
#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448)
+#define NAVI10_VOLTAGE_SCALE (4)
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index e859bb1132ac..fc9679ea2368 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -24,6 +24,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#define SMU_11_0_PARTIAL_PPTABLE
+
#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -31,6 +33,7 @@
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
+#include "smu_v11_0_pptable.h"
#include "soc15_common.h"
#include "atom.h"
#include "amd_pcie.h"
@@ -1045,13 +1048,44 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
return 0;
}
+uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
+ uint32_t od_limit, max_power_limit;
+ struct smu_11_0_powerplay_table *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ powerplay_table = table_context->power_play_table;
+
+ max_power_limit = smu_get_pptable_power_limit(smu);
+
+ if (!max_power_limit) {
+ // If we couldn't get the table limit, fall back on first-read value
+ if (!smu->default_power_limit)
+ smu->default_power_limit = smu->power_limit;
+ max_power_limit = smu->default_power_limit;
+ }
+
+ if (smu->od_enabled) {
+ od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+
+ pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
+
+ max_power_limit *= (100 + od_limit);
+ max_power_limit /= 100;
+ }
+
+ return max_power_limit;
+}
+
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
int ret = 0;
+ uint32_t max_power_limit;
- if (n > smu->default_power_limit) {
- pr_err("New power limit is over the max allowed %d\n",
- smu->default_power_limit);
+ max_power_limit = smu_v11_0_get_max_power_limit(smu);
+
+ if (n > max_power_limit) {
+ pr_err("New power limit (%d) is over the max allowed %d\n",
+ n,
+ max_power_limit);
return -EINVAL;
}
@@ -1779,3 +1813,30 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
return ret;
}
+
+int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (initialize) {
+ if (table_context->overdrive_table) {
+ return -EINVAL;
+ }
+ table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL);
+ if (!table_context->overdrive_table) {
+ return -ENOMEM;
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export overdrive table!\n");
+ return ret;
+ }
+ }
+ ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import overdrive table!\n");
+ return ret;
+ }
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 5b21386f558d..0b4892833808 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -466,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
sizeof(PPTable_t));
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
- table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
return 0;
}
OpenPOWER on IntegriCloud