diff options
author | Sean Paul <seanpaul@chromium.org> | 2018-10-24 14:26:04 -0400 |
---|---|---|
committer | Sean Paul <seanpaul@chromium.org> | 2018-10-24 14:26:04 -0400 |
commit | 6542e9adc0da1e23d81ff9314265a029b961906d (patch) | |
tree | 35ba360a7150d8b042a9fd75ab54ef83b34a2b95 /drivers/gpu/drm/amd | |
parent | 2b02a05bdc3a62d36e0d0b015351897109e25991 (diff) | |
parent | f2bfc71aee75feff33ca659322b72ffeed5a243d (diff) | |
download | talos-op-linux-6542e9adc0da1e23d81ff9314265a029b961906d.tar.gz talos-op-linux-6542e9adc0da1e23d81ff9314265a029b961906d.zip |
Merge drm/drm-next into drm-misc-next
4.19 is out, Lyude asked for a backmerge, and it's been a while. All
very good reasons on their own :-)
Signed-off-by: Sean Paul <seanpaul@chromium.org>
Diffstat (limited to 'drivers/gpu/drm/amd')
183 files changed, 4021 insertions, 1974 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 6cb35e3dab30..d0102cfc8efb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -81,6 +81,23 @@ #include "amdgpu_bo_list.h" #include "amdgpu_gem.h" +#define MAX_GPU_INSTANCE 16 + +struct amdgpu_gpu_instance +{ + struct amdgpu_device *adev; + int mgpu_fan_enabled; +}; + +struct amdgpu_mgpu_info +{ + struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; + struct mutex mutex; + uint32_t num_gpu; + uint32_t num_dgpu; + uint32_t num_apu; +}; + /* * Modules parameters. */ @@ -134,6 +151,7 @@ extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; +extern struct amdgpu_mgpu_info mgpu_info; #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -146,6 +164,7 @@ extern int amdgpu_cik_support; #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ +#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ #define AMDGPU_IB_POOL_SIZE 16 #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 @@ -408,16 +427,25 @@ typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, /* - * Other graphics doorbells can be allocated here: from 0x8c to 0xef + * Other graphics doorbells can be allocated here: from 0x8c to 0xdf * Graphics voltage island aperture 1 - * default non-graphics QWORD index is 0xF0 - 0xFF inclusive + * default non-graphics QWORD index is 0xe0 - 0xFF inclusive */ - /* sDMA engines */ - AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0, - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, - AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2, - AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, + /* sDMA engines reserved from 0xe0 -oxef */ + AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xE0, + AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xE1, + AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xE8, + AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xE9, + + /* For vega10 sriov, the sdma doorbell must be fixed as follow + * to keep the same setting with host driver, or it will + * happen conflicts + */ + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 = 0xF0, + AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 = 0xF2, + AMDGPU_VEGA10_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, /* Interrupt handler */ AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ @@ -588,31 +616,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); */ void amdgpu_test_moves(struct amdgpu_device *adev); - -/* - * amdgpu smumgr functions - */ -struct amdgpu_smumgr_funcs { - int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); - int (*request_smu_load_fw)(struct amdgpu_device *adev); - int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); -}; - -/* - * amdgpu smumgr - */ -struct amdgpu_smumgr { - struct amdgpu_bo *toc_buf; - struct amdgpu_bo *smu_buf; - /* asic priv smu data */ - void *priv; - spinlock_t smu_lock; - /* smumgr functions */ - const struct amdgpu_smumgr_funcs *smumgr_funcs; - /* ucode loading complete flag */ - uint32_t fw_flags; -}; - /* * ASIC specific register table accessible by UMD */ @@ -948,9 +951,6 @@ struct amdgpu_device { u32 cg_flags; u32 pg_flags; - /* amdgpu smumgr */ - struct amdgpu_smumgr smu; - /* gfx */ struct amdgpu_gfx gfx; @@ -1015,6 +1015,9 @@ struct amdgpu_device { bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; + /* s3/s4 mask */ + bool in_suspend; + /* record last mm index being written through WREG32*/ unsigned long last_mm_index; bool in_gpu_reset; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 6488e90ec948..7f0afc526419 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -359,7 +359,9 @@ out: * * Checks the acpi event and if it matches an atif event, * handles it. - * Returns NOTIFY code + * + * Returns: + * NOTIFY_BAD or NOTIFY_DONE, depending on the event. */ static int amdgpu_atif_handler(struct amdgpu_device *adev, struct acpi_bus_event *event) @@ -373,11 +375,16 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; + /* Is this actually our event? */ if (!atif || !atif->notification_cfg.enabled || - event->type != atif->notification_cfg.command_code) - /* Not our event */ - return NOTIFY_DONE; + event->type != atif->notification_cfg.command_code) { + /* These events will generate keypresses otherwise */ + if (event->type == ACPI_VIDEO_NOTIFY_PROBE) + return NOTIFY_BAD; + else + return NOTIFY_DONE; + } if (atif->functions.sbios_requests) { struct atif_sbios_requests req; @@ -386,7 +393,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev, count = amdgpu_atif_get_sbios_requests(atif, &req); if (count <= 0) - return NOTIFY_DONE; + return NOTIFY_BAD; DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 0f9947edb12a..c31a8849e9f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -76,6 +76,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); break; @@ -123,7 +124,7 @@ static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) { - int i; + int i, n; int last_valid_bit; if (adev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { @@ -162,7 +163,15 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) &gpu_resources.doorbell_physical_address, &gpu_resources.doorbell_aperture_size, &gpu_resources.doorbell_start_offset); - if (adev->asic_type >= CHIP_VEGA10) { + + if (adev->asic_type < CHIP_VEGA10) { + kgd2kfd->device_init(adev->kfd, &gpu_resources); + return; + } + + n = (adev->asic_type < CHIP_VEGA20) ? 2 : 8; + + for (i = 0; i < n; i += 2) { /* On SOC15 the BIF is involved in routing * doorbells using the low 12 bits of the * address. Communicate the assignments to @@ -170,20 +179,31 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) * process in case of 64-bit doorbells so we * can use each doorbell assignment twice. */ - gpu_resources.sdma_doorbell[0][0] = - AMDGPU_DOORBELL64_sDMA_ENGINE0; - gpu_resources.sdma_doorbell[0][1] = - AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200; - gpu_resources.sdma_doorbell[1][0] = - AMDGPU_DOORBELL64_sDMA_ENGINE1; - gpu_resources.sdma_doorbell[1][1] = - AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200; - /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for - * SDMA, IH and VCN. So don't use them for the CP. - */ - gpu_resources.reserved_doorbell_mask = 0x1f0; - gpu_resources.reserved_doorbell_val = 0x0f0; + if (adev->asic_type == CHIP_VEGA10) { + gpu_resources.sdma_doorbell[0][i] = + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + (i >> 1); + gpu_resources.sdma_doorbell[0][i+1] = + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); + gpu_resources.sdma_doorbell[1][i] = + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + (i >> 1); + gpu_resources.sdma_doorbell[1][i+1] = + AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); + } else { + gpu_resources.sdma_doorbell[0][i] = + AMDGPU_DOORBELL64_sDMA_ENGINE0 + (i >> 1); + gpu_resources.sdma_doorbell[0][i+1] = + AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200 + (i >> 1); + gpu_resources.sdma_doorbell[1][i] = + AMDGPU_DOORBELL64_sDMA_ENGINE1 + (i >> 1); + gpu_resources.sdma_doorbell[1][i+1] = + AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200 + (i >> 1); + } } + /* Doorbells 0x0e0-0ff and 0x2e0-2ff are reserved for + * SDMA, IH and VCN. So don't use them for the CP. + */ + gpu_resources.reserved_doorbell_mask = 0x1e0; + gpu_resources.reserved_doorbell_val = 0x0e0; kgd2kfd->device_init(adev->kfd, &gpu_resources); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 056fc6ef6c63..8e0d4f7196b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -174,7 +174,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); -uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, void *vm, struct kgd_mem **mem, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b2e45c8e2e0d..244d9834a381 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -142,7 +142,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base); + uint64_t page_table_base); static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd); @@ -874,7 +874,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) } static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base) + uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -882,7 +882,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, pr_err("trying to set page table base for wrong VMID\n"); return; } - WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base); + WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, + lower_32_bits(page_table_base)); } static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index ea7c18ce7754..9f149914ad6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -45,8 +45,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -struct vi_sdma_mqd; - /* * Register access functions */ @@ -100,7 +98,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base); + uint64_t page_table_base); static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); @@ -282,7 +280,8 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) lock_srbm(kgd, mec, pipe, 0, 0); - WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK); + WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); unlock_srbm(kgd); @@ -834,7 +833,7 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) } static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base) + uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); @@ -842,7 +841,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, pr_err("trying to set page table base for wrong VMID\n"); return; } - WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base); + WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, + lower_32_bits(page_table_base)); } static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index c9176537550b..42cb4c4e0929 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -138,7 +138,7 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, uint8_t vmid); static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base); + uint64_t page_table_base); static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); static void set_scratch_backing_va(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); @@ -1013,11 +1013,10 @@ static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) } static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint32_t page_table_base) + uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT | - AMDGPU_PTE_VALID; + uint64_t base = page_table_base | AMDGPU_PTE_VALID; if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6ee9dc476c86..df0a059565f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1131,11 +1131,15 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) amdgpu_vm_release_compute(adev, avm); } -uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) { struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_bo *pd = avm->root.base.bo; + struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); - return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; + if (adev->asic_type < CHIP_VEGA10) + return avm->pd_phys_addr >> AMDGPU_GPU_PAGE_SHIFT; + return avm->pd_phys_addr; } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index f5fb93795a69..dd9a4fb9ce39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -826,21 +826,13 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { struct drm_minor *minor = adev->ddev->primary; struct dentry *ent, *root = minor->debugfs_root; - unsigned i, j; + unsigned int i; for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) { ent = debugfs_create_file(debugfs_regs_names[i], S_IFREG | S_IRUGO, root, adev, debugfs_regs[i]); - if (IS_ERR(ent)) { - for (j = 0; j < i; j++) { - debugfs_remove(adev->debugfs_regs[i]); - adev->debugfs_regs[i] = NULL; - } - return PTR_ERR(ent); - } - - if (!i) + if (!i && !IS_ERR_OR_NULL(ent)) i_size_write(ent->d_inode, adev->rmmio_size); adev->debugfs_regs[i] = ent; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bd79d0a31942..1e4dd09a5072 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1525,6 +1525,92 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.sw) + continue; + if (adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.hw = true; + } + } + + return 0; +} + +static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.sw) + continue; + if (adev->ip_blocks[i].status.hw) + continue; + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.hw = true; + } + + return 0; +} + +static int amdgpu_device_fw_loading(struct amdgpu_device *adev) +{ + int r = 0; + int i; + + if (adev->asic_type >= CHIP_VEGA10) { + for (i = 0; i < adev->num_ip_blocks; i++) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { + if (adev->in_gpu_reset || adev->in_suspend) { + if (amdgpu_sriov_vf(adev) && adev->in_gpu_reset) + break; /* sriov gpu reset, psp need to do hw_init before IH because of hw limit */ + r = adev->ip_blocks[i].version->funcs->resume(adev); + if (r) { + DRM_ERROR("resume of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + } else { + r = adev->ip_blocks[i].version->funcs->hw_init(adev); + if (r) { + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + } + adev->ip_blocks[i].status.hw = true; + } + } + } + + if (adev->powerplay.pp_funcs->load_firmware) { + r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); + if (r) { + pr_err("firmware loading failed\n"); + return r; + } + } + + return 0; +} + /** * amdgpu_device_ip_init - run init for hardware IPs * @@ -1581,19 +1667,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } } - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.sw) - continue; - if (adev->ip_blocks[i].status.hw) - continue; - r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); - if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); - return r; - } - adev->ip_blocks[i].status.hw = true; - } + r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/ + if (r) + return r; + + r = amdgpu_device_ip_hw_init_phase1(adev); + if (r) + return r; + + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + + r = amdgpu_device_ip_hw_init_phase2(adev); + if (r) + return r; amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); @@ -1656,7 +1744,7 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev, for (j = 0; j < adev->num_ip_blocks; j++) { i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.late_initialized) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && @@ -1686,7 +1774,7 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power for (j = 0; j < adev->num_ip_blocks; j++) { i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1; - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.late_initialized) continue; /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && @@ -1723,7 +1811,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) + if (!adev->ip_blocks[i].status.hw) continue; if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); @@ -1732,8 +1820,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) adev->ip_blocks[i].version->funcs->name, r); return r; } - adev->ip_blocks[i].status.late_initialized = true; } + adev->ip_blocks[i].status.late_initialized = true; } amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE); @@ -1803,6 +1891,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { + amdgpu_ucode_free_bo(adev); amdgpu_free_static_csa(adev); amdgpu_device_wb_fini(adev); amdgpu_device_vram_scratch_fini(adev); @@ -1833,6 +1922,43 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) return 0; } +static int amdgpu_device_enable_mgpu_fan_boost(void) +{ + struct amdgpu_gpu_instance *gpu_ins; + struct amdgpu_device *adev; + int i, ret = 0; + + mutex_lock(&mgpu_info.mutex); + + /* + * MGPU fan boost feature should be enabled + * only when there are two or more dGPUs in + * the system + */ + if (mgpu_info.num_dgpu < 2) + goto out; + + for (i = 0; i < mgpu_info.num_dgpu; i++) { + gpu_ins = &(mgpu_info.gpu_ins[i]); + adev = gpu_ins->adev; + if (!(adev->flags & AMD_IS_APU) && + !gpu_ins->mgpu_fan_enabled && + adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->enable_mgpu_fan_boost) { + ret = amdgpu_dpm_enable_mgpu_fan_boost(adev); + if (ret) + break; + + gpu_ins->mgpu_fan_enabled = 1; + } + } + +out: + mutex_unlock(&mgpu_info.mutex); + + return ret; +} + /** * amdgpu_device_ip_late_init_func_handler - work handler for ib test * @@ -1847,6 +1973,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work) r = amdgpu_ib_ring_tests(adev); if (r) DRM_ERROR("ib ring test failed (%d).\n", r); + + r = amdgpu_device_enable_mgpu_fan_boost(); + if (r) + DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); } static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) @@ -2082,7 +2212,8 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) continue; if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON || adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC || - adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH || + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) continue; r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { @@ -2114,6 +2245,11 @@ static int amdgpu_device_ip_resume(struct amdgpu_device *adev) r = amdgpu_device_ip_resume_phase1(adev); if (r) return r; + + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase2(adev); return r; @@ -2608,6 +2744,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; + adev->in_suspend = true; drm_kms_helper_poll_disable(dev); if (fbcon) @@ -2793,6 +2930,8 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) #ifdef CONFIG_PM dev->dev->power.disable_depth--; #endif + adev->in_suspend = false; + return 0; } @@ -3061,6 +3200,10 @@ retry: if (r) goto out; + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + r = amdgpu_device_ip_resume_phase2(adev); if (r) goto out; @@ -3117,6 +3260,10 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, /* we need recover gart prior to run SMC/CP/SDMA resume */ amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); + r = amdgpu_device_fw_loading(adev); + if (r) + return r; + /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index ff24e1cc5b65..f972cd156795 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -278,6 +278,9 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_get_fan_speed_rpm(adev, s) \ ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) +#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \ + ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) + #define amdgpu_dpm_get_sclk(adev, l) \ ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l))) @@ -357,6 +360,10 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\ (adev)->powerplay.pp_handle, type, parameter, size)) +#define amdgpu_dpm_enable_mgpu_fan_boost(adev) \ + ((adev)->powerplay.pp_funcs->enable_mgpu_fan_boost(\ + (adev)->powerplay.pp_handle)) + struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 723f0f7754bd..28781414d71c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -127,6 +127,9 @@ int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; uint amdgpu_smu_memory_pool_size = 0; +struct amdgpu_mgpu_info mgpu_info = { + .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), +}; /** * DOC: vramlimit (int) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 176f28777f5e..5448cf27654e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -196,6 +196,19 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) } /** + * amdgpu_fence_schedule_fallback - schedule fallback check + * + * @ring: pointer to struct amdgpu_ring + * + * Start a timer as fallback to our interrupts. + */ +static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring) +{ + mod_timer(&ring->fence_drv.fallback_timer, + jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT); +} + +/** * amdgpu_fence_process - check for fence activity * * @ring: pointer to struct amdgpu_ring @@ -203,8 +216,10 @@ int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s) * Checks the current fence value and calculates the last * signalled fence value. Wakes the fence queue if the * sequence number has increased. + * + * Returns true if fence was processed */ -void amdgpu_fence_process(struct amdgpu_ring *ring) +bool amdgpu_fence_process(struct amdgpu_ring *ring) { struct amdgpu_fence_driver *drv = &ring->fence_drv; uint32_t seq, last_seq; @@ -216,8 +231,12 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq); + if (del_timer(&ring->fence_drv.fallback_timer) && + seq != ring->fence_drv.sync_seq) + amdgpu_fence_schedule_fallback(ring); + if (unlikely(seq == last_seq)) - return; + return false; last_seq &= drv->num_fences_mask; seq &= drv->num_fences_mask; @@ -244,6 +263,24 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) dma_fence_put(fence); } while (last_seq != seq); + + return true; +} + +/** + * amdgpu_fence_fallback - fallback for hardware interrupts + * + * @work: delayed work item + * + * Checks for fence activity. + */ +static void amdgpu_fence_fallback(struct timer_list *t) +{ + struct amdgpu_ring *ring = from_timer(ring, t, + fence_drv.fallback_timer); + + if (amdgpu_fence_process(ring)) + DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); } /** @@ -393,6 +430,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, atomic_set(&ring->fence_drv.last_seq, 0); ring->fence_drv.initialized = false; + timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0); + ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1; spin_lock_init(&ring->fence_drv.lock); ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *), @@ -468,6 +507,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, ring->fence_drv.irq_src, ring->fence_drv.irq_type); drm_sched_fini(&ring->sched); + del_timer_sync(&ring->fence_drv.fallback_timer); for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j) dma_fence_put(ring->fence_drv.fences[j]); kfree(ring->fence_drv.fences); @@ -561,6 +601,27 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) } /** + * amdgpu_fence_enable_signaling - enable signalling on fence + * @fence: fence + * + * This function is called with fence_queue lock held, and adds a callback + * to fence_queue that checks if this fence is signaled, and if so it + * signals the fence and removes itself. + */ +static bool amdgpu_fence_enable_signaling(struct dma_fence *f) +{ + struct amdgpu_fence *fence = to_amdgpu_fence(f); + struct amdgpu_ring *ring = fence->ring; + + if (!timer_pending(&ring->fence_drv.fallback_timer)) + amdgpu_fence_schedule_fallback(ring); + + DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); + + return true; +} + +/** * amdgpu_fence_free - free up the fence memory * * @rcu: RCU callback head @@ -590,6 +651,7 @@ static void amdgpu_fence_release(struct dma_fence *f) static const struct dma_fence_ops amdgpu_fence_ops = { .get_driver_name = amdgpu_fence_get_driver_name, .get_timeline_name = amdgpu_fence_get_timeline_name, + .enable_signaling = amdgpu_fence_enable_signaling, .release = amdgpu_fence_release, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f172e92c463c..b61b5c11aead 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -297,8 +297,7 @@ struct amdgpu_gfx { /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; - /* s3/s4 mask */ - bool in_suspend; + /* NGG */ struct amdgpu_ngg ngg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 9a5b252784a1..d73367cab4f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -146,6 +146,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) { const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; + /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ + u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); mc->gart_size += adev->pm.smu_prv_buffer_size; @@ -153,7 +155,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) * the GART base on a 4GB boundary as well. */ size_bf = mc->fb_start; - size_af = adev->gmc.mc_mask + 1 - ALIGN(mc->fb_end + 1, four_gb); + size_af = max_mc_address + 1 - ALIGN(mc->fb_end + 1, four_gb); if (mc->gart_size > max(size_bf, size_af)) { dev_warn(adev->dev, "limiting GART\n"); @@ -164,7 +166,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) (size_af < mc->gart_size)) mc->gart_start = 0; else - mc->gart_start = mc->mc_mask - mc->gart_size + 1; + mc->gart_start = max_mc_address - mc->gart_size + 1; mc->gart_start &= ~(four_gb - 1); mc->gart_end = mc->gart_start + mc->gart_size - 1; @@ -200,16 +202,13 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc) } if (size_bf > size_af) { - mc->agp_start = mc->fb_start > mc->gart_start ? - mc->gart_end + 1 : 0; + mc->agp_start = (mc->fb_start - size_bf) & sixteen_gb_mask; mc->agp_size = size_bf; } else { - mc->agp_start = (mc->fb_start > mc->gart_start ? - mc->fb_end : mc->gart_end) + 1, + mc->agp_start = ALIGN(mc->fb_end + 1, sixteen_gb); mc->agp_size = size_af; } - mc->agp_start = ALIGN(mc->agp_start, sixteen_gb); mc->agp_end = mc->agp_start + mc->agp_size - 1; dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n", mc->agp_size >> 20, mc->agp_start, mc->agp_end); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 47817e00f54f..b8963b725dfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -354,6 +354,14 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) if (!ring || !ring->ready) continue; + /* skip IB tests for KIQ in general for the below reasons: + * 1. We never submit IBs to the KIQ + * 2. KIQ doesn't use the EOP interrupts, + * we use some other CP interrupt. + */ + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + continue; + /* MM engine need more time */ if (ring->funcs->type == AMDGPU_RING_TYPE_UVD || ring->funcs->type == AMDGPU_RING_TYPE_VCE || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 3a072a7a39f0..df9b173c3d0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -574,7 +574,7 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) /* skip over VMID 0, since it is the system VM */ for (j = 1; j < id_mgr->num_ids; ++j) { amdgpu_vmid_reset(adev, i, j); - amdgpu_sync_create(&id_mgr->ids[i].active); + amdgpu_sync_create(&id_mgr->ids[j].active); list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 4ed86218cef3..8af67f649660 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -24,46 +24,21 @@ #include <drm/drmP.h> #include "amdgpu.h" #include "amdgpu_ih.h" -#include "amdgpu_amdkfd.h" - -/** - * amdgpu_ih_ring_alloc - allocate memory for the IH ring - * - * @adev: amdgpu_device pointer - * - * Allocate a ring buffer for the interrupt controller. - * Returns 0 for success, errors for failure. - */ -static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev) -{ - int r; - - /* Allocate ring buffer */ - if (adev->irq.ih.ring_obj == NULL) { - r = amdgpu_bo_create_kernel(adev, adev->irq.ih.ring_size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - &adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - if (r) { - DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r); - return r; - } - } - return 0; -} /** * amdgpu_ih_ring_init - initialize the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to initialize + * @ring_size: ring size to allocate + * @use_bus_addr: true when we can use dma_alloc_coherent * * Initializes the IH state and allocates a buffer * for the IH ring buffer. * Returns 0 for success, errors for failure. */ -int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, - bool use_bus_addr) +int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + unsigned ring_size, bool use_bus_addr) { u32 rb_bufsz; int r; @@ -71,70 +46,76 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, /* Align ring size */ rb_bufsz = order_base_2(ring_size / 4); ring_size = (1 << rb_bufsz) * 4; - adev->irq.ih.ring_size = ring_size; - adev->irq.ih.ptr_mask = adev->irq.ih.ring_size - 1; - adev->irq.ih.rptr = 0; - adev->irq.ih.use_bus_addr = use_bus_addr; - - if (adev->irq.ih.use_bus_addr) { - if (!adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - adev->irq.ih.ring = pci_alloc_consistent(adev->pdev, - adev->irq.ih.ring_size + 8, - &adev->irq.ih.rb_dma_addr); - if (adev->irq.ih.ring == NULL) - return -ENOMEM; - memset((void *)adev->irq.ih.ring, 0, adev->irq.ih.ring_size + 8); - adev->irq.ih.wptr_offs = (adev->irq.ih.ring_size / 4) + 0; - adev->irq.ih.rptr_offs = (adev->irq.ih.ring_size / 4) + 1; - } - return 0; + ih->ring_size = ring_size; + ih->ptr_mask = ih->ring_size - 1; + ih->rptr = 0; + ih->use_bus_addr = use_bus_addr; + + if (use_bus_addr) { + if (ih->ring) + return 0; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + ih->ring = dma_alloc_coherent(adev->dev, ih->ring_size + 8, + &ih->rb_dma_addr, GFP_KERNEL); + if (ih->ring == NULL) + return -ENOMEM; + + memset((void *)ih->ring, 0, ih->ring_size + 8); + ih->wptr_offs = (ih->ring_size / 4) + 0; + ih->rptr_offs = (ih->ring_size / 4) + 1; } else { - r = amdgpu_device_wb_get(adev, &adev->irq.ih.wptr_offs); + r = amdgpu_device_wb_get(adev, &ih->wptr_offs); + if (r) + return r; + + r = amdgpu_device_wb_get(adev, &ih->rptr_offs); if (r) { - dev_err(adev->dev, "(%d) ih wptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - r = amdgpu_device_wb_get(adev, &adev->irq.ih.rptr_offs); + r = amdgpu_bo_create_kernel(adev, ih->ring_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); if (r) { - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - dev_err(adev->dev, "(%d) ih rptr_offs wb alloc failed\n", r); + amdgpu_device_wb_free(adev, ih->rptr_offs); + amdgpu_device_wb_free(adev, ih->wptr_offs); return r; } - - return amdgpu_ih_ring_alloc(adev); } + return 0; } /** * amdgpu_ih_ring_fini - tear down the IH state * * @adev: amdgpu_device pointer + * @ih: ih ring to tear down * * Tears down the IH state and frees buffer * used for the IH ring buffer. */ -void amdgpu_ih_ring_fini(struct amdgpu_device *adev) +void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - if (adev->irq.ih.use_bus_addr) { - if (adev->irq.ih.ring) { - /* add 8 bytes for the rptr/wptr shadows and - * add them to the end of the ring allocation. - */ - pci_free_consistent(adev->pdev, adev->irq.ih.ring_size + 8, - (void *)adev->irq.ih.ring, - adev->irq.ih.rb_dma_addr); - adev->irq.ih.ring = NULL; - } + if (ih->use_bus_addr) { + if (!ih->ring) + return; + + /* add 8 bytes for the rptr/wptr shadows and + * add them to the end of the ring allocation. + */ + dma_free_coherent(adev->dev, ih->ring_size + 8, + (void *)ih->ring, ih->rb_dma_addr); + ih->ring = NULL; } else { - amdgpu_bo_free_kernel(&adev->irq.ih.ring_obj, - &adev->irq.ih.gpu_addr, - (void **)&adev->irq.ih.ring); - amdgpu_device_wb_free(adev, adev->irq.ih.wptr_offs); - amdgpu_device_wb_free(adev, adev->irq.ih.rptr_offs); + amdgpu_bo_free_kernel(&ih->ring_obj, &ih->gpu_addr, + (void **)&ih->ring); + amdgpu_device_wb_free(adev, ih->wptr_offs); + amdgpu_device_wb_free(adev, ih->rptr_offs); } } @@ -142,56 +123,43 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev) * amdgpu_ih_process - interrupt handler * * @adev: amdgpu_device pointer + * @ih: ih ring to process * * Interrupt hander (VI), walk the IH ring. * Returns irq process return code. */ -int amdgpu_ih_process(struct amdgpu_device *adev) +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + void (*callback)(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih)) { - struct amdgpu_iv_entry entry; u32 wptr; - if (!adev->irq.ih.enabled || adev->shutdown) + if (!ih->enabled || adev->shutdown) return IRQ_NONE; wptr = amdgpu_ih_get_wptr(adev); restart_ih: /* is somebody else already processing irqs? */ - if (atomic_xchg(&adev->irq.ih.lock, 1)) + if (atomic_xchg(&ih->lock, 1)) return IRQ_NONE; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, adev->irq.ih.rptr, wptr); + DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); - while (adev->irq.ih.rptr != wptr) { - u32 ring_index = adev->irq.ih.rptr >> 2; - - /* Prescreening of high-frequency interrupts */ - if (!amdgpu_ih_prescreen_iv(adev)) { - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; - continue; - } - - /* Before dispatching irq to IP blocks, send it to amdkfd */ - amdgpu_amdkfd_interrupt(adev, - (const void *) &adev->irq.ih.ring[ring_index]); - - entry.iv_entry = (const uint32_t *) - &adev->irq.ih.ring[ring_index]; - amdgpu_ih_decode_iv(adev, &entry); - adev->irq.ih.rptr &= adev->irq.ih.ptr_mask; - - amdgpu_irq_dispatch(adev, &entry); + while (ih->rptr != wptr) { + callback(adev, ih); + ih->rptr &= ih->ptr_mask; } + amdgpu_ih_set_rptr(adev); - atomic_set(&adev->irq.ih.lock, 0); + atomic_set(&ih->lock, 0); /* make sure wptr hasn't changed while processing */ wptr = amdgpu_ih_get_wptr(adev); - if (wptr != adev->irq.ih.rptr) + if (wptr != ih->rptr) goto restart_ih; return IRQ_HANDLED; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h index 0d5b3f5201d2..9ce8c93ec19b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h @@ -24,12 +24,8 @@ #ifndef __AMDGPU_IH_H__ #define __AMDGPU_IH_H__ -#include "soc15_ih_clientid.h" - struct amdgpu_device; - -#define AMDGPU_IH_CLIENTID_LEGACY 0 -#define AMDGPU_IH_CLIENTID_MAX SOC15_IH_CLIENTID_MAX +struct amdgpu_iv_entry; /* * R6xx+ IH ring @@ -51,22 +47,6 @@ struct amdgpu_ih_ring { dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */ }; -#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4 - -struct amdgpu_iv_entry { - unsigned client_id; - unsigned src_id; - unsigned ring_id; - unsigned vmid; - unsigned vmid_src; - uint64_t timestamp; - unsigned timestamp_src; - unsigned pasid; - unsigned pasid_src; - unsigned src_data[AMDGPU_IH_SRC_DATA_MAX_SIZE_DW]; - const uint32_t *iv_entry; -}; - /* provided by the ih block */ struct amdgpu_ih_funcs { /* ring read/write ptr handling, called from interrupt context */ @@ -82,9 +62,11 @@ struct amdgpu_ih_funcs { #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) -int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size, - bool use_bus_addr); -void amdgpu_ih_ring_fini(struct amdgpu_device *adev); -int amdgpu_ih_process(struct amdgpu_device *adev); +int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + unsigned ring_size, bool use_bus_addr); +void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih); +int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih, + void (*callback)(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih)); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index b927e8798534..52c17f6219a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -51,6 +51,7 @@ #include "atom.h" #include "amdgpu_connectors.h" #include "amdgpu_trace.h" +#include "amdgpu_amdkfd.h" #include <linux/pm_runtime.h> @@ -123,7 +124,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) int r; spin_lock_irqsave(&adev->irq.lock, irqflags); - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; @@ -147,6 +148,34 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) } /** + * amdgpu_irq_callback - callback from the IH ring + * + * @adev: amdgpu device pointer + * @ih: amdgpu ih ring + * + * Callback from IH ring processing to handle the entry at the current position + * and advance the read pointer. + */ +static void amdgpu_irq_callback(struct amdgpu_device *adev, + struct amdgpu_ih_ring *ih) +{ + u32 ring_index = ih->rptr >> 2; + struct amdgpu_iv_entry entry; + + /* Prescreening of high-frequency interrupts */ + if (!amdgpu_ih_prescreen_iv(adev)) + return; + + /* Before dispatching irq to IP blocks, send it to amdkfd */ + amdgpu_amdkfd_interrupt(adev, (const void *) &ih->ring[ring_index]); + + entry.iv_entry = (const uint32_t *)&ih->ring[ring_index]; + amdgpu_ih_decode_iv(adev, &entry); + + amdgpu_irq_dispatch(adev, &entry); +} + +/** * amdgpu_irq_handler - IRQ handler * * @irq: IRQ number (unused) @@ -163,7 +192,7 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) struct amdgpu_device *adev = dev->dev_private; irqreturn_t ret; - ret = amdgpu_ih_process(adev); + ret = amdgpu_ih_process(adev, &adev->irq.ih, amdgpu_irq_callback); if (ret == IRQ_HANDLED) pm_runtime_mark_last_busy(dev->dev); return ret; @@ -273,7 +302,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) cancel_work_sync(&adev->reset_work); } - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; @@ -313,7 +342,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned client_id, unsigned src_id, struct amdgpu_irq_src *source) { - if (client_id >= AMDGPU_IH_CLIENTID_MAX) + if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) return -EINVAL; if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) @@ -367,7 +396,7 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, trace_amdgpu_iv(entry); - if (client_id >= AMDGPU_IH_CLIENTID_MAX) { + if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); return; } @@ -440,7 +469,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { int i, j, k; - for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) { + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 3375ad778edc..f6ce171cb8aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -25,19 +25,38 @@ #define __AMDGPU_IRQ_H__ #include <linux/irqdomain.h> +#include "soc15_ih_clientid.h" #include "amdgpu_ih.h" -#define AMDGPU_MAX_IRQ_SRC_ID 0x100 +#define AMDGPU_MAX_IRQ_SRC_ID 0x100 #define AMDGPU_MAX_IRQ_CLIENT_ID 0x100 +#define AMDGPU_IRQ_CLIENTID_LEGACY 0 +#define AMDGPU_IRQ_CLIENTID_MAX SOC15_IH_CLIENTID_MAX + +#define AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW 4 + struct amdgpu_device; -struct amdgpu_iv_entry; enum amdgpu_interrupt_state { AMDGPU_IRQ_STATE_DISABLE, AMDGPU_IRQ_STATE_ENABLE, }; +struct amdgpu_iv_entry { + unsigned client_id; + unsigned src_id; + unsigned ring_id; + unsigned vmid; + unsigned vmid_src; + uint64_t timestamp; + unsigned timestamp_src; + unsigned pasid; + unsigned pasid_src; + unsigned src_data[AMDGPU_IRQ_SRC_DATA_MAX_SIZE_DW]; + const uint32_t *iv_entry; +}; + struct amdgpu_irq_src { unsigned num_types; atomic_t *enabled_types; @@ -63,7 +82,7 @@ struct amdgpu_irq { bool installed; spinlock_t lock; /* interrupt sources */ - struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX]; + struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX]; /* status, etc. */ bool msi_enabled; /* msi enabled */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a64056dadc58..81732a84c2ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -40,6 +40,30 @@ #include "amdgpu_gem.h" #include "amdgpu_display.h" +static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev) +{ + struct amdgpu_gpu_instance *gpu_instance; + int i; + + mutex_lock(&mgpu_info.mutex); + + for (i = 0; i < mgpu_info.num_gpu; i++) { + gpu_instance = &(mgpu_info.gpu_ins[i]); + if (gpu_instance->adev == adev) { + mgpu_info.gpu_ins[i] = + mgpu_info.gpu_ins[mgpu_info.num_gpu - 1]; + mgpu_info.num_gpu--; + if (adev->flags & AMD_IS_APU) + mgpu_info.num_apu--; + else + mgpu_info.num_dgpu--; + break; + } + } + + mutex_unlock(&mgpu_info.mutex); +} + /** * amdgpu_driver_unload_kms - Main unload function for KMS. * @@ -55,6 +79,8 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev == NULL) return; + amdgpu_unregister_gpu_instance(adev); + if (adev->rmmio == NULL) goto done_free; @@ -75,6 +101,31 @@ done_free: dev->dev_private = NULL; } +static void amdgpu_register_gpu_instance(struct amdgpu_device *adev) +{ + struct amdgpu_gpu_instance *gpu_instance; + + mutex_lock(&mgpu_info.mutex); + + if (mgpu_info.num_gpu >= MAX_GPU_INSTANCE) { + DRM_ERROR("Cannot register more gpu instance\n"); + mutex_unlock(&mgpu_info.mutex); + return; + } + + gpu_instance = &(mgpu_info.gpu_ins[mgpu_info.num_gpu]); + gpu_instance->adev = adev; + gpu_instance->mgpu_fan_enabled = 0; + + mgpu_info.num_gpu++; + if (adev->flags & AMD_IS_APU) + mgpu_info.num_apu++; + else + mgpu_info.num_dgpu++; + + mutex_unlock(&mgpu_info.mutex); +} + /** * amdgpu_driver_load_kms - Main load function for KMS. * @@ -169,6 +220,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) pm_runtime_put_autosuspend(dev->dev); } + amdgpu_register_gpu_instance(adev); out: if (r) { /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 8c334fc808c2..94055a485e01 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1120,12 +1120,19 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, struct amdgpu_device *adev = dev_get_drvdata(dev); int err; u32 value; + u32 pwm_mode; /* Can't adjust fan when the card is off */ if ((adev->flags & AMD_IS_PX) && (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) return -EINVAL; + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (pwm_mode != AMD_FAN_CTRL_MANUAL) { + pr_info("manual fan speed control should be enabled first\n"); + return -EINVAL; + } + err = kstrtou32(buf, 10, &value); if (err) return err; @@ -1187,6 +1194,148 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return sprintf(buf, "%i\n", speed); } +static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + u32 min_rpm = 0; + u32 size = sizeof(min_rpm); + int r; + + if (!adev->powerplay.pp_funcs->read_sensor) + return -EINVAL; + + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, + (void *)&min_rpm, &size); + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); +} + +static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + u32 max_rpm = 0; + u32 size = sizeof(max_rpm); + int r; + + if (!adev->powerplay.pp_funcs->read_sensor) + return -EINVAL; + + r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, + (void *)&max_rpm, &size); + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); +} + +static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + int err; + u32 rpm = 0; + + /* Can't adjust fan when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + + if (adev->powerplay.pp_funcs->get_fan_speed_rpm) { + err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); + if (err) + return err; + } + + return sprintf(buf, "%i\n", rpm); +} + +static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + int err; + u32 value; + u32 pwm_mode; + + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + if (pwm_mode != AMD_FAN_CTRL_MANUAL) + return -ENODATA; + + /* Can't adjust fan when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + + err = kstrtou32(buf, 10, &value); + if (err) + return err; + + if (adev->powerplay.pp_funcs->set_fan_speed_rpm) { + err = amdgpu_dpm_set_fan_speed_rpm(adev, value); + if (err) + return err; + } + + return count; +} + +static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + u32 pwm_mode = 0; + + if (!adev->powerplay.pp_funcs->get_fan_control_mode) + return -EINVAL; + + pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + + return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); +} + +static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct amdgpu_device *adev = dev_get_drvdata(dev); + int err; + int value; + u32 pwm_mode; + + /* Can't adjust fan when the card is off */ + if ((adev->flags & AMD_IS_PX) && + (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + + if (!adev->powerplay.pp_funcs->set_fan_control_mode) + return -EINVAL; + + err = kstrtoint(buf, 10, &value); + if (err) + return err; + + if (value == 0) + pwm_mode = AMD_FAN_CTRL_AUTO; + else if (value == 1) + pwm_mode = AMD_FAN_CTRL_MANUAL; + else + return -EINVAL; + + amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); + + return count; +} + static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, struct device_attribute *attr, char *buf) @@ -1406,8 +1555,16 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, * * - pwm1_max: pulse width modulation fan control maximum level (255) * + * - fan1_min: an minimum value Unit: revolution/min (RPM) + * + * - fan1_max: an maxmum value Unit: revolution/max (RPM) + * * - fan1_input: fan speed in RPM * + * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) + * + * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable + * * You can use hwmon tools like sensors to view this information on your system. * */ @@ -1420,6 +1577,10 @@ static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_ static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); +static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); +static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); +static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); +static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); @@ -1438,6 +1599,10 @@ static struct attribute *hwmon_attributes[] = { &sensor_dev_attr_pwm1_min.dev_attr.attr, &sensor_dev_attr_pwm1_max.dev_attr.attr, &sensor_dev_attr_fan1_input.dev_attr.attr, + &sensor_dev_attr_fan1_min.dev_attr.attr, + &sensor_dev_attr_fan1_max.dev_attr.attr, + &sensor_dev_attr_fan1_target.dev_attr.attr, + &sensor_dev_attr_fan1_enable.dev_attr.attr, &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in0_label.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, @@ -1456,13 +1621,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* Skip fan attributes if fan is not present */ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_input.dev_attr.attr)) + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; /* Skip limit attributes if DPM is not enabled */ @@ -1472,7 +1640,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_pwm1.dev_attr.attr || attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_input.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_target.dev_attr.attr || + attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; /* mask fan attributes if we have no bindings for this asic to expose */ @@ -1497,10 +1670,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, /* hide max/min values if we can't both query and manage the fan */ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && !adev->powerplay.pp_funcs->get_fan_speed_percent) && + (!adev->powerplay.pp_funcs->set_fan_speed_rpm && + !adev->powerplay.pp_funcs->get_fan_speed_rpm) && (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; + if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && + !adev->powerplay.pp_funcs->get_fan_speed_rpm) && + (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) + return 0; + /* only APUs have vddnb */ if (!(adev->flags & AMD_IS_APU) && (attr == &sensor_dev_attr_in1_input.dev_attr.attr || @@ -1976,6 +2157,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) { uint32_t value; + uint64_t value64; uint32_t query = 0; int size; @@ -2014,6 +2196,10 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a seq_printf(m, "GPU Load: %u %%\n", value); seq_printf(m, "\n"); + /* SMC feature mask */ + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) + seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); + /* UVD clocks */ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { if (!value) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index bd397d2916fb..25d2f3e757f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -452,8 +452,6 @@ static int psp_hw_fini(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; - amdgpu_ucode_fini_bo(adev); - psp_ring_destroy(psp, PSP_RING_TYPE__KM); amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 44fc665e4577..4caa301ce454 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -77,6 +77,7 @@ struct amdgpu_fence_driver { bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; + struct timer_list fallback_timer; unsigned num_fences_mask; spinlock_t lock; struct dma_fence **fences; @@ -96,7 +97,7 @@ void amdgpu_fence_driver_resume(struct amdgpu_device *adev); int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, unsigned flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s); -void amdgpu_fence_process(struct amdgpu_ring *ring); +bool amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring, uint32_t wait_seq, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index d17503f0df8e..500113ec65ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -46,10 +46,6 @@ struct amdgpu_sdma_instance { struct amdgpu_sdma { struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; -#ifdef CONFIG_DRM_AMDGPU_SI - //SI DMA has a difference trap irq number for the second engine - struct amdgpu_irq_src trap_irq_1; -#endif struct amdgpu_irq_src trap_irq; struct amdgpu_irq_src illegal_inst_irq; int num_instances; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 2e87414422f9..e9bf70e2ac51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -103,7 +103,7 @@ TRACE_EVENT(amdgpu_iv, __entry->src_data[2] = iv->src_data[2]; __entry->src_data[3] = iv->src_data[3]; ), - TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x\n", + TP_printk("client_id:%u src_id:%u ring:%u vmid:%u timestamp: %llu pasid:%u src_data: %08x %08x %08x %08x", __entry->client_id, __entry->src_id, __entry->ring_id, __entry->vmid, __entry->timestamp, __entry->pasid, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c index b160b958e5fe..f212402570a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace_points.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 +// SPDX-License-Identifier: MIT /* Copyright Red Hat Inc 2010. * * Permission is hereby granted, free of charge, to any person obtaining a diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 1fa8bc337859..7b33867036e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -297,10 +297,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_POLARIS11: case CHIP_POLARIS12: case CHIP_VEGAM: - if (!load_type) - return AMDGPU_FW_LOAD_DIRECT; - else - return AMDGPU_FW_LOAD_SMU; + return AMDGPU_FW_LOAD_SMU; case CHIP_VEGA10: case CHIP_RAVEN: case CHIP_VEGA12: @@ -423,32 +420,41 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode, return 0; } -int amdgpu_ucode_init_bo(struct amdgpu_device *adev) +int amdgpu_ucode_create_bo(struct amdgpu_device *adev) { - uint64_t fw_offset = 0; - int i, err; - struct amdgpu_firmware_info *ucode = NULL; - const struct common_firmware_header *header = NULL; - - if (!adev->firmware.fw_size) { - dev_warn(adev->dev, "No ip firmware need to load\n"); - return 0; - } - - if (!adev->in_gpu_reset) { - err = amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, - amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, - &adev->firmware.fw_buf, - &adev->firmware.fw_buf_mc, - &adev->firmware.fw_buf_ptr); - if (err) { + if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { + amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, + amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, + &adev->firmware.fw_buf, + &adev->firmware.fw_buf_mc, + &adev->firmware.fw_buf_ptr); + if (!adev->firmware.fw_buf) { dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n"); - goto failed; + return -ENOMEM; + } else if (amdgpu_sriov_vf(adev)) { + memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size); } } + return 0; +} + +void amdgpu_ucode_free_bo(struct amdgpu_device *adev) +{ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) + amdgpu_bo_free_kernel(&adev->firmware.fw_buf, + &adev->firmware.fw_buf_mc, + &adev->firmware.fw_buf_ptr); +} - memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size); +int amdgpu_ucode_init_bo(struct amdgpu_device *adev) +{ + uint64_t fw_offset = 0; + int i; + struct amdgpu_firmware_info *ucode = NULL; + /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */ + if (!amdgpu_sriov_vf(adev) && (adev->in_gpu_reset || adev->in_suspend)) + return 0; /* * if SMU loaded firmware, it needn't add SMC, UVD, and VCE * ucode info here @@ -465,7 +471,6 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { - header = (const struct common_firmware_header *)ucode->fw->data; amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset, adev->firmware.fw_buf_ptr + fw_offset); if (i == AMDGPU_UCODE_ID_CP_MEC1 && @@ -480,33 +485,4 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) } } return 0; - -failed: - if (err) - adev->firmware.load_type = AMDGPU_FW_LOAD_DIRECT; - - return err; -} - -int amdgpu_ucode_fini_bo(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_firmware_info *ucode = NULL; - - if (!adev->firmware.fw_size) - return 0; - - for (i = 0; i < adev->firmware.max_ucodes; i++) { - ucode = &adev->firmware.ucode[i]; - if (ucode->fw) { - ucode->mc_addr = 0; - ucode->kaddr = NULL; - } - } - - amdgpu_bo_free_kernel(&adev->firmware.fw_buf, - &adev->firmware.fw_buf_mc, - &adev->firmware.fw_buf_ptr); - - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 8f3f1117728c..aa6641b944a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -276,8 +276,10 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr); int amdgpu_ucode_validate(const struct firmware *fw); bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr, uint16_t hdr_major, uint16_t hdr_minor); + int amdgpu_ucode_init_bo(struct amdgpu_device *adev); -int amdgpu_ucode_fini_bo(struct amdgpu_device *adev); +int amdgpu_ucode_create_bo(struct amdgpu_device *adev); +void amdgpu_ucode_free_bo(struct amdgpu_device *adev); enum amdgpu_firmware_load_type amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0cc5190f4f36..5f3f54073818 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) { int i; + cancel_delayed_work_sync(&adev->vce.idle_work); + if (adev->vce.vcpu_bo == NULL) return 0; @@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev) if (i == AMDGPU_MAX_VCE_HANDLES) return 0; - cancel_delayed_work_sync(&adev->vce.idle_work); /* TODO: suspending running encoding sessions isn't supported */ return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index a73674f9a0f5..27da13df2f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -36,6 +36,7 @@ #include "soc15_common.h" #include "vcn/vcn_1_0_offset.h" +#include "vcn/vcn_1_0_sh_mask.h" /* 1 second timeout */ #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -120,8 +121,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) version_major, version_minor, family_id); } - bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE - + AMDGPU_VCN_SESSION_SIZE * 40; + bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_CONTEXT_SIZE; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, @@ -162,11 +162,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev) unsigned size; void *ptr; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->vcn.vcpu_bo == NULL) return 0; - cancel_delayed_work_sync(&adev->vcn.idle_work); - size = amdgpu_bo_size(adev->vcn.vcpu_bo); ptr = adev->vcn.cpu_addr; @@ -212,18 +212,161 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev) return 0; } +static int amdgpu_vcn_pause_dpg_mode(struct amdgpu_device *adev, + struct dpg_pause_state *new_state) +{ + int ret_code; + uint32_t reg_data = 0; + uint32_t reg_data2 = 0; + struct amdgpu_ring *ring; + + /* pause/unpause if state is changed */ + if (adev->vcn.pause_state.fw_based != new_state->fw_based) { + DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", + adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, + new_state->fw_based, new_state->jpeg); + + reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + ret_code = 0; + + if (!(reg_data & UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK)) + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + if (!ret_code) { + /* pause DPG non-jpeg */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code); + + /* Restore */ + ring = &adev->vcn.ring_enc[0]; + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + + ring = &adev->vcn.ring_enc[1]; + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + + ring = &adev->vcn.ring_dec; + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + } + } else { + /* unpause dpg non-jpeg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); + } + adev->vcn.pause_state.fw_based = new_state->fw_based; + } + + /* pause/unpause if state is changed */ + if (adev->vcn.pause_state.jpeg != new_state->jpeg) { + DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d", + adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg, + new_state->fw_based, new_state->jpeg); + + reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK); + + if (new_state->jpeg == VCN_DPG_STATE__PAUSE) { + ret_code = 0; + + if (!(reg_data & UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK)) + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + if (!ret_code) { + /* Make sure JPRG Snoop is disabled before sending the pause */ + reg_data2 = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); + reg_data2 |= UVD_POWER_STATUS__JRBC_SNOOP_DIS_MASK; + WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, reg_data2); + + /* pause DPG jpeg */ + reg_data |= UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_DPG_PAUSE, + UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK, ret_code); + + /* Restore */ + ring = &adev->vcn.ring_jpeg; + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, + UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, ring->wptr); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, ring->wptr); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); + + ring = &adev->vcn.ring_dec; + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, + RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2)); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + } + } else { + /* unpause dpg jpeg, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data); + } + adev->vcn.pause_state.jpeg = new_state->jpeg; + } + + return 0; +} + static void amdgpu_vcn_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vcn.idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); - unsigned i; + unsigned int fences = 0; + unsigned int i; for (i = 0; i < adev->vcn.num_enc_rings; ++i) { fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); } + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + + if (fences) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; + + if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg)) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; + + amdgpu_vcn_pause_dpg_mode(adev, &new_state); + } + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg); + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec); if (fences == 0) { amdgpu_gfx_off_ctrl(adev, true); @@ -250,6 +393,22 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, AMD_PG_STATE_UNGATE); } + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + struct dpg_pause_state new_state; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) + new_state.fw_based = VCN_DPG_STATE__PAUSE; + else + new_state.fw_based = adev->vcn.pause_state.fw_based; + + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) + new_state.jpeg = VCN_DPG_STATE__PAUSE; + else + new_state.jpeg = adev->vcn.pause_state.jpeg; + + amdgpu_vcn_pause_dpg_mode(adev, &new_state); + } } void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring) @@ -264,7 +423,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); + WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", @@ -272,11 +431,11 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) return r; } amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); + tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -616,7 +775,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0xCAFEDEAD); + WREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { @@ -626,12 +785,12 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring) } amdgpu_ring_write(ring, - PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0, 0, 0)); + PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID)); + tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -665,7 +824,7 @@ static int amdgpu_vcn_jpeg_set_reg(struct amdgpu_ring *ring, uint32_t handle, ib = &job->ibs[0]; - ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH), 0, 0, PACKETJ_TYPE0); + ib->ptr[0] = PACKETJ(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9), 0, 0, PACKETJ_TYPE0); ib->ptr[1] = 0xDEADBEEF; for (i = 2; i < 16; i += 2) { ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); @@ -714,7 +873,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = 0; for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_JPEG_PITCH)); + tmp = RREG32(SOC15_REG_OFFSET(UVD, 0, mmUVD_SCRATCH9)); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 0b0b8638d73f..a0ad19af9080 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -24,9 +24,9 @@ #ifndef __AMDGPU_VCN_H__ #define __AMDGPU_VCN_H__ -#define AMDGPU_VCN_STACK_SIZE (200*1024) -#define AMDGPU_VCN_HEAP_SIZE (256*1024) -#define AMDGPU_VCN_SESSION_SIZE (50*1024) +#define AMDGPU_VCN_STACK_SIZE (128*1024) +#define AMDGPU_VCN_CONTEXT_SIZE (512*1024) + #define AMDGPU_VCN_FIRMWARE_OFFSET 256 #define AMDGPU_VCN_MAX_ENC_RINGS 3 @@ -56,6 +56,16 @@ enum engine_status_constants { UVD_STATUS__RBC_BUSY = 0x1, }; +enum internal_dpg_state { + VCN_DPG_STATE__UNPAUSE = 0, + VCN_DPG_STATE__PAUSE, +}; + +struct dpg_pause_state { + enum internal_dpg_state fw_based; + enum internal_dpg_state jpeg; +}; + struct amdgpu_vcn { struct amdgpu_bo *vcpu_bo; void *cpu_addr; @@ -69,6 +79,8 @@ struct amdgpu_vcn { struct amdgpu_ring ring_jpeg; struct amdgpu_irq_src irq; unsigned num_enc_rings; + enum amd_powergating_state cur_state; + struct dpg_pause_state pause_state; }; int amdgpu_vcn_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index d2469453dca2..79220a91abe3 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6277,12 +6277,12 @@ static int ci_dpm_sw_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) return ret; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 78ab939ae5d8..f41f5f57e9f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -2002,6 +2002,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); if (amdgpu_dpm == -1) amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); else @@ -2014,8 +2016,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; @@ -2023,6 +2023,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); if (amdgpu_dpm == -1) amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); else @@ -2035,8 +2037,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); - amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; @@ -2044,6 +2044,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -2053,8 +2055,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block); - amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; @@ -2063,6 +2064,8 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &cik_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &kv_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -2072,8 +2075,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c index 44d10c2172f6..b5775c6a857b 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c @@ -276,7 +276,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev, dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; + entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; @@ -318,7 +318,7 @@ static int cik_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) return r; @@ -332,7 +332,7 @@ static int cik_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); amdgpu_irq_remove_domain(adev); return 0; @@ -468,8 +468,7 @@ static const struct amdgpu_ih_funcs cik_ih_funcs = { static void cik_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &cik_ih_funcs; + adev->irq.ih_funcs = &cik_ih_funcs; } const struct amdgpu_ip_block_version cik_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 154b1499b07e..b918c8886b75 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -970,19 +970,19 @@ static int cik_sdma_sw_init(void *handle) } /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, &adev->sdma.illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 247, &adev->sdma.illegal_inst_irq); if (r) return r; @@ -1370,10 +1370,8 @@ static const struct amdgpu_buffer_funcs cik_sdma_buffer_funcs = { static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &cik_sdma_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = { @@ -1389,15 +1387,13 @@ static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) struct drm_gpu_scheduler *sched; unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version cik_sdma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c index 960c29e17da6..df5ac4d85a00 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c @@ -255,7 +255,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev, dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; + entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; @@ -297,7 +297,7 @@ static int cz_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) return r; @@ -311,7 +311,7 @@ static int cz_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); amdgpu_irq_remove_domain(adev); return 0; @@ -449,8 +449,7 @@ static const struct amdgpu_ih_funcs cz_ih_funcs = { static void cz_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &cz_ih_funcs; + adev->irq.ih_funcs = &cz_ih_funcs; } const struct amdgpu_ip_block_version cz_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 3916aa6cc4ec..4cfecdce29a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2746,19 +2746,19 @@ static int dce_v10_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); if (r) return r; } for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); if (r) return r; } /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) return r; @@ -3570,8 +3570,7 @@ static const struct amdgpu_display_funcs dce_v10_0_display_funcs = { static void dce_v10_0_set_display_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.funcs == NULL) - adev->mode_info.funcs = &dce_v10_0_display_funcs; + adev->mode_info.funcs = &dce_v10_0_display_funcs; } static const struct amdgpu_irq_src_funcs dce_v10_0_crtc_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 4ffb612a4e53..7c868916d90f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2867,19 +2867,19 @@ static int dce_v11_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); if (r) return r; } for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); if (r) return r; } /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); if (r) return r; @@ -3702,8 +3702,7 @@ static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.funcs == NULL) - adev->mode_info.funcs = &dce_v11_0_display_funcs; + adev->mode_info.funcs = &dce_v11_0_display_funcs; } static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 480c5348a14f..17eaaba36017 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2616,19 +2616,19 @@ static int dce_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); if (r) return r; } for (i = 8; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); if (r) return r; } /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq); if (r) return r; @@ -3376,8 +3376,7 @@ static const struct amdgpu_display_funcs dce_v6_0_display_funcs = { static void dce_v6_0_set_display_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.funcs == NULL) - adev->mode_info.funcs = &dce_v6_0_display_funcs; + adev->mode_info.funcs = &dce_v6_0_display_funcs; } static const struct amdgpu_irq_src_funcs dce_v6_0_crtc_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 797196476c94..8c0576978d36 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2643,19 +2643,19 @@ static int dce_v8_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; for (i = 0; i < adev->mode_info.num_crtc; i++) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); if (r) return r; } for (i = 8; i < 20; i += 2) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); if (r) return r; } /* HPD hotplug */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq); if (r) return r; @@ -3458,8 +3458,7 @@ static const struct amdgpu_display_funcs dce_v8_0_display_funcs = { static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.funcs == NULL) - adev->mode_info.funcs = &dce_v8_0_display_funcs; + adev->mode_info.funcs = &dce_v8_0_display_funcs; } static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 15257634a53a..fdace004544d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -372,7 +372,7 @@ static int dce_virtual_sw_init(void *handle) int r, i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq); if (r) return r; @@ -649,8 +649,7 @@ static const struct amdgpu_display_funcs dce_virtual_display_funcs = { static void dce_virtual_set_display_funcs(struct amdgpu_device *adev) { - if (adev->mode_info.funcs == NULL) - adev->mode_info.funcs = &dce_virtual_display_funcs; + adev->mode_info.funcs = &dce_virtual_display_funcs; } static int dce_virtual_pageflip(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index de184a886057..d76eb27945dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1552,7 +1552,7 @@ static void gfx_v6_0_config_init(struct amdgpu_device *adev) adev->gfx.config.double_offchip_lds_buf = 0; } -static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) +static void gfx_v6_0_constants_init(struct amdgpu_device *adev) { u32 gb_addr_config = 0; u32 mc_shared_chmap, mc_arb_ramcfg; @@ -3094,15 +3094,15 @@ static int gfx_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i, r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq); if (r) return r; @@ -3175,7 +3175,7 @@ static int gfx_v6_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfx_v6_0_gpu_init(adev); + gfx_v6_0_constants_init(adev); r = gfx_v6_0_rlc_resume(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index fc39ebbc9d9f..0e72bc09939a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1886,14 +1886,14 @@ static void gfx_v7_0_config_init(struct amdgpu_device *adev) } /** - * gfx_v7_0_gpu_init - setup the 3D engine + * gfx_v7_0_constants_init - setup the 3D engine * * @adev: amdgpu_device pointer * - * Configures the 3D engine and tiling configuration - * registers so that the 3D engine is usable. + * init the gfx constants such as the 3D engine, tiling configuration + * registers, maximum number of quad pipes, render backends... */ -static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) +static void gfx_v7_0_constants_init(struct amdgpu_device *adev) { u32 sh_mem_cfg, sh_static_mem_cfg, sh_mem_base; u32 tmp; @@ -4516,18 +4516,18 @@ static int gfx_v7_0_sw_init(void *handle) adev->gfx.mec.num_queue_per_pipe = 8; /* EOP Event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq); if (r) return r; /* Privileged reg */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq); if (r) return r; /* Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq); if (r) return r; @@ -4624,7 +4624,7 @@ static int gfx_v7_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - gfx_v7_0_gpu_init(adev); + gfx_v7_0_constants_init(adev); /* init rlc */ r = gfx_v7_0_rlc_resume(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 470dc80f4fe7..3d0f277a6523 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1173,64 +1173,61 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) } } - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; - info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; - info->fw = adev->gfx.pfp_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; - info->ucode_id = AMDGPU_UCODE_ID_CP_ME; - info->fw = adev->gfx.me_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; - info->ucode_id = AMDGPU_UCODE_ID_CP_CE; - info->fw = adev->gfx.ce_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_PFP]; + info->ucode_id = AMDGPU_UCODE_ID_CP_PFP; + info->fw = adev->gfx.pfp_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_ME]; + info->ucode_id = AMDGPU_UCODE_ID_CP_ME; + info->fw = adev->gfx.me_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_CE]; + info->ucode_id = AMDGPU_UCODE_ID_CP_CE; + info->fw = adev->gfx.ce_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_G; + info->fw = adev->gfx.rlc_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; + info->fw = adev->gfx.mec_fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + + /* we need account JT in */ + cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_G]; - info->ucode_id = AMDGPU_UCODE_ID_RLC_G; - info->fw = adev->gfx.rlc_fw; - header = (const struct common_firmware_header *)info->fw->data; + if (amdgpu_sriov_vf(adev)) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE]; + info->ucode_id = AMDGPU_UCODE_ID_STORAGE; + info->fw = adev->gfx.mec_fw; adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE); + } - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; - info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; - info->fw = adev->gfx.mec_fw; + if (adev->gfx.mec2_fw) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; + info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; + info->fw = adev->gfx.mec2_fw; header = (const struct common_firmware_header *)info->fw->data; adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - - /* we need account JT in */ - cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE); - - if (amdgpu_sriov_vf(adev)) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_STORAGE]; - info->ucode_id = AMDGPU_UCODE_ID_STORAGE; - info->fw = adev->gfx.mec_fw; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(64 * PAGE_SIZE), PAGE_SIZE); - } - - if (adev->gfx.mec2_fw) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2]; - info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2; - info->fw = adev->gfx.mec2_fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - } - } out: @@ -2048,36 +2045,31 @@ static int gfx_v8_0_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; - /* KIQ event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_INT_IB2, &adev->gfx.kiq.irq); - if (r) - return r; - /* EOP Event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq); if (r) return r; /* Privileged reg */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_REG_FAULT, &adev->gfx.priv_reg_irq); if (r) return r; /* Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_PRIV_INSTR_FAULT, &adev->gfx.priv_inst_irq); if (r) return r; /* Add CP EDC/ECC irq */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_ECC_ERROR, &adev->gfx.cp_ecc_error_irq); if (r) return r; /* SQ interrupts. */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SQ_INTERRUPT_MSG, &adev->gfx.sq_irq); if (r) { DRM_ERROR("amdgpu_irq_add() for SQ failed: %d\n", r); @@ -3835,7 +3827,7 @@ static void gfx_v8_0_config_init(struct amdgpu_device *adev) } } -static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) +static void gfx_v8_0_constants_init(struct amdgpu_device *adev) { u32 tmp, sh_static_mem_cfg; int i; @@ -4181,65 +4173,11 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev) udelay(50); } -static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev) -{ - const struct rlc_firmware_header_v2_0 *hdr; - const __le32 *fw_data; - unsigned i, fw_size; - - if (!adev->gfx.rlc_fw) - return -EINVAL; - - hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; - amdgpu_ucode_print_rlc_hdr(&hdr->header); - - fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - - WREG32(mmRLC_GPM_UCODE_ADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmRLC_GPM_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(mmRLC_GPM_UCODE_ADDR, adev->gfx.rlc_fw_version); - - return 0; -} - static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) { - int r; - u32 tmp; - gfx_v8_0_rlc_stop(adev); - - /* disable CG */ - tmp = RREG32(mmRLC_CGCG_CGLS_CTRL); - tmp &= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK | - RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK); - WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); - if (adev->asic_type == CHIP_POLARIS11 || - adev->asic_type == CHIP_POLARIS10 || - adev->asic_type == CHIP_POLARIS12 || - adev->asic_type == CHIP_VEGAM) { - tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); - tmp &= ~0x3; - WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); - } - - /* disable PG */ - WREG32(mmRLC_PG_CNTL, 0); - gfx_v8_0_rlc_reset(adev); gfx_v8_0_init_pg(adev); - - - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - /* legacy rlc firmware loading */ - r = gfx_v8_0_rlc_load_microcode(adev); - if (r) - return r; - } - gfx_v8_0_rlc_start(adev); return 0; @@ -4265,63 +4203,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) udelay(50); } -static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev) -{ - const struct gfx_firmware_header_v1_0 *pfp_hdr; - const struct gfx_firmware_header_v1_0 *ce_hdr; - const struct gfx_firmware_header_v1_0 *me_hdr; - const __le32 *fw_data; - unsigned i, fw_size; - - if (!adev->gfx.me_fw || !adev->gfx.pfp_fw || !adev->gfx.ce_fw) - return -EINVAL; - - pfp_hdr = (const struct gfx_firmware_header_v1_0 *) - adev->gfx.pfp_fw->data; - ce_hdr = (const struct gfx_firmware_header_v1_0 *) - adev->gfx.ce_fw->data; - me_hdr = (const struct gfx_firmware_header_v1_0 *) - adev->gfx.me_fw->data; - - amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); - amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); - amdgpu_ucode_print_gfx_hdr(&me_hdr->header); - - gfx_v8_0_cp_gfx_enable(adev, false); - - /* PFP */ - fw_data = (const __le32 *) - (adev->gfx.pfp_fw->data + - le32_to_cpu(pfp_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(pfp_hdr->header.ucode_size_bytes) / 4; - WREG32(mmCP_PFP_UCODE_ADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmCP_PFP_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(mmCP_PFP_UCODE_ADDR, adev->gfx.pfp_fw_version); - - /* CE */ - fw_data = (const __le32 *) - (adev->gfx.ce_fw->data + - le32_to_cpu(ce_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(ce_hdr->header.ucode_size_bytes) / 4; - WREG32(mmCP_CE_UCODE_ADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmCP_CE_UCODE_DATA, le32_to_cpup(fw_data++)); - WREG32(mmCP_CE_UCODE_ADDR, adev->gfx.ce_fw_version); - - /* ME */ - fw_data = (const __le32 *) - (adev->gfx.me_fw->data + - le32_to_cpu(me_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(me_hdr->header.ucode_size_bytes) / 4; - WREG32(mmCP_ME_RAM_WADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmCP_ME_RAM_DATA, le32_to_cpup(fw_data++)); - WREG32(mmCP_ME_RAM_WADDR, adev->gfx.me_fw_version); - - return 0; -} - static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev) { u32 count = 0; @@ -4521,52 +4402,6 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable) udelay(50); } -static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev) -{ - const struct gfx_firmware_header_v1_0 *mec_hdr; - const __le32 *fw_data; - unsigned i, fw_size; - - if (!adev->gfx.mec_fw) - return -EINVAL; - - gfx_v8_0_cp_compute_enable(adev, false); - - mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; - amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); - - fw_data = (const __le32 *) - (adev->gfx.mec_fw->data + - le32_to_cpu(mec_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(mec_hdr->header.ucode_size_bytes) / 4; - - /* MEC1 */ - WREG32(mmCP_MEC_ME1_UCODE_ADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmCP_MEC_ME1_UCODE_DATA, le32_to_cpup(fw_data+i)); - WREG32(mmCP_MEC_ME1_UCODE_ADDR, adev->gfx.mec_fw_version); - - /* Loading MEC2 firmware is only necessary if MEC2 should run different microcode than MEC1. */ - if (adev->gfx.mec2_fw) { - const struct gfx_firmware_header_v1_0 *mec2_hdr; - - mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; - amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); - - fw_data = (const __le32 *) - (adev->gfx.mec2_fw->data + - le32_to_cpu(mec2_hdr->header.ucode_array_offset_bytes)); - fw_size = le32_to_cpu(mec2_hdr->header.ucode_size_bytes) / 4; - - WREG32(mmCP_MEC_ME2_UCODE_ADDR, 0); - for (i = 0; i < fw_size; i++) - WREG32(mmCP_MEC_ME2_UCODE_DATA, le32_to_cpup(fw_data+i)); - WREG32(mmCP_MEC_ME2_UCODE_ADDR, adev->gfx.mec2_fw_version); - } - - return 0; -} - /* KIQ functions */ static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) { @@ -4892,7 +4727,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation)); ((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -5000,17 +4835,6 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (!(adev->flags & AMD_IS_APU)) gfx_v8_0_enable_gui_idle_interrupt(adev, false); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - /* legacy firmware loading */ - r = gfx_v8_0_cp_gfx_load_microcode(adev); - if (r) - return r; - - r = gfx_v8_0_cp_compute_load_microcode(adev); - if (r) - return r; - } - r = gfx_v8_0_kiq_resume(adev); if (r) return r; @@ -5039,7 +4863,7 @@ static int gfx_v8_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfx_v8_0_init_golden_registers(adev); - gfx_v8_0_gpu_init(adev); + gfx_v8_0_constants_init(adev); r = gfx_v8_0_rlc_resume(adev); if (r) @@ -5080,6 +4904,55 @@ static int gfx_v8_0_kcq_disable(struct amdgpu_device *adev) return r; } +static bool gfx_v8_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE) + || RREG32(mmGRBM_STATUS2) != 0x8) + return false; + else + return true; +} + +static bool gfx_v8_0_rlc_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (RREG32(mmGRBM_STATUS2) != 0x8) + return false; + else + return true; +} + +static int gfx_v8_0_wait_for_rlc_idle(void *handle) +{ + unsigned int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (gfx_v8_0_rlc_is_idle(handle)) + return 0; + + udelay(1); + } + return -ETIMEDOUT; +} + +static int gfx_v8_0_wait_for_idle(void *handle) +{ + unsigned int i; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + for (i = 0; i < adev->usec_timeout; i++) { + if (gfx_v8_0_is_idle(handle)) + return 0; + + udelay(1); + } + return -ETIMEDOUT; +} + static int gfx_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -5098,51 +4971,27 @@ static int gfx_v8_0_hw_fini(void *handle) pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } - gfx_v8_0_cp_enable(adev, false); - gfx_v8_0_rlc_stop(adev); - + adev->gfx.rlc.funcs->enter_safe_mode(adev); + if (!gfx_v8_0_wait_for_idle(adev)) + gfx_v8_0_cp_enable(adev, false); + else + pr_err("cp is busy, skip halt cp\n"); + if (!gfx_v8_0_wait_for_rlc_idle(adev)) + gfx_v8_0_rlc_stop(adev); + else + pr_err("rlc is busy, skip halt rlc\n"); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int gfx_v8_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->gfx.in_suspend = true; - return gfx_v8_0_hw_fini(adev); + return gfx_v8_0_hw_fini(handle); } static int gfx_v8_0_resume(void *handle) { - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - r = gfx_v8_0_hw_init(adev); - adev->gfx.in_suspend = false; - return r; -} - -static bool gfx_v8_0_is_idle(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (REG_GET_FIELD(RREG32(mmGRBM_STATUS), GRBM_STATUS, GUI_ACTIVE)) - return false; - else - return true; -} - -static int gfx_v8_0_wait_for_idle(void *handle) -{ - unsigned i; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - for (i = 0; i < adev->usec_timeout; i++) { - if (gfx_v8_0_is_idle(handle)) - return 0; - - udelay(1); - } - return -ETIMEDOUT; + return gfx_v8_0_hw_init(handle); } static bool gfx_v8_0_check_soft_reset(void *handle) @@ -7013,52 +6862,6 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned int type, - enum amdgpu_interrupt_state state) -{ - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); - - switch (type) { - case AMDGPU_CP_KIQ_IRQ_DRIVER0: - WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE, - state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); - if (ring->me == 1) - WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL, - ring->pipe, - GENERIC2_INT_ENABLE, - state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); - else - WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL, - ring->pipe, - GENERIC2_INT_ENABLE, - state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1); - break; - default: - BUG(); /* kiq only support GENERIC2_INT now */ - break; - } - return 0; -} - -static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - u8 me_id, pipe_id, queue_id; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); - - me_id = (entry->ring_id & 0x0c) >> 2; - pipe_id = (entry->ring_id & 0x03) >> 0; - queue_id = (entry->ring_id & 0x70) >> 4; - DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", - me_id, pipe_id, queue_id); - - amdgpu_fence_process(ring); - return 0; -} - static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -7209,11 +7012,6 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { .process = gfx_v8_0_priv_inst_irq, }; -static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = { - .set = gfx_v8_0_kiq_set_interrupt_state, - .process = gfx_v8_0_kiq_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v8_0_cp_ecc_error_irq_funcs = { .set = gfx_v8_0_set_cp_ecc_int_state, .process = gfx_v8_0_cp_ecc_error_irq, @@ -7235,9 +7033,6 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; - adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; - adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs; - adev->gfx.cp_ecc_error_irq.num_types = 1; adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index f369d9603435..6d7baf59d6e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -97,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_9_0[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), @@ -133,7 +134,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00020000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = @@ -173,7 +177,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x00003120), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = @@ -247,7 +254,10 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff03ff, 0x01000107), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = @@ -908,6 +918,50 @@ static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, buffer[count++] = cpu_to_le32(0); } +static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev) +{ + struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + uint32_t pg_always_on_cu_num = 2; + uint32_t always_on_cu_num; + uint32_t i, j, k; + uint32_t mask, cu_bitmap, counter; + + if (adev->flags & AMD_IS_APU) + always_on_cu_num = 4; + else if (adev->asic_type == CHIP_VEGA12) + always_on_cu_num = 8; + else + always_on_cu_num = 12; + + mutex_lock(&adev->grbm_idx_mutex); + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { + mask = 1; + cu_bitmap = 0; + counter = 0; + gfx_v9_0_select_se_sh(adev, i, j, 0xffffffff); + + for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) { + if (cu_info->bitmap[i][j] & mask) { + if (counter == pg_always_on_cu_num) + WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap); + if (counter < always_on_cu_num) + cu_bitmap |= mask; + else + break; + counter++; + } + mask <<= 1; + } + + WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, cu_bitmap); + cu_info->ao_cu_bitmap[i][j] = cu_bitmap; + } + } + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); +} + static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) { uint32_t data; @@ -941,8 +995,59 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) data |= 0x00C00000; WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); - /* set RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF */ - WREG32_SOC15(GC, 0, mmRLC_LB_ALWAYS_ACTIVE_CU_MASK, 0xFFF); + /* + * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xF (4 CUs AON for Raven), + * programmed in gfx_v9_0_init_always_on_cu_mask() + */ + + /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, + * but used for RLC_LB_CNTL configuration */ + data = RLC_LB_CNTL__LB_CNT_SPIM_ACTIVE_MASK; + data |= REG_SET_FIELD(data, RLC_LB_CNTL, CU_MASK_USED_OFF_HYST, 0x09); + data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); + WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); + mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_0_init_always_on_cu_mask(adev); +} + +static void gfx_v9_4_init_lbpw(struct amdgpu_device *adev) +{ + uint32_t data; + + /* set mmRLC_LB_THR_CONFIG_1/2/3/4 */ + WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_1, 0x0000007F); + WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_2, 0x033388F8); + WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_3, 0x00000077); + WREG32_SOC15(GC, 0, mmRLC_LB_THR_CONFIG_4, (0x10 | 0x27 << 8 | 0x02FA << 16)); + + /* set mmRLC_LB_CNTR_INIT = 0x0000_0000 */ + WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_INIT, 0x00000000); + + /* set mmRLC_LB_CNTR_MAX = 0x0000_0500 */ + WREG32_SOC15(GC, 0, mmRLC_LB_CNTR_MAX, 0x00000800); + + mutex_lock(&adev->grbm_idx_mutex); + /* set mmRLC_LB_INIT_CU_MASK thru broadcast mode to enable all SE/SH*/ + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + WREG32_SOC15(GC, 0, mmRLC_LB_INIT_CU_MASK, 0xffffffff); + + /* set mmRLC_LB_PARAMS = 0x003F_1006 */ + data = REG_SET_FIELD(0, RLC_LB_PARAMS, FIFO_SAMPLES, 0x0003); + data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLES, 0x0010); + data |= REG_SET_FIELD(data, RLC_LB_PARAMS, PG_IDLE_SAMPLE_INTERVAL, 0x033F); + WREG32_SOC15(GC, 0, mmRLC_LB_PARAMS, data); + + /* set mmRLC_GPM_GENERAL_7[31-16] = 0x00C0 */ + data = RREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7); + data &= 0x0000FFFF; + data |= 0x00C00000; + WREG32_SOC15(GC, 0, mmRLC_GPM_GENERAL_7, data); + + /* + * RLC_LB_ALWAYS_ACTIVE_CU_MASK = 0xFFF (12 CUs AON), + * programmed in gfx_v9_0_init_always_on_cu_mask() + */ /* set RLC_LB_CNTL = 0x8000_0095, 31 bit is reserved, * but used for RLC_LB_CNTL configuration */ @@ -951,6 +1056,8 @@ static void gfx_v9_0_init_lbpw(struct amdgpu_device *adev) data |= REG_SET_FIELD(data, RLC_LB_CNTL, RESERVED, 0x80000); WREG32_SOC15(GC, 0, mmRLC_LB_CNTL, data); mutex_unlock(&adev->grbm_idx_mutex); + + gfx_v9_0_init_always_on_cu_mask(adev); } static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable) @@ -1084,8 +1191,17 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) rv_init_cp_jump_table(adev); amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + } + switch (adev->asic_type) { + case CHIP_RAVEN: gfx_v9_0_init_lbpw(adev); + break; + case CHIP_VEGA20: + gfx_v9_4_init_lbpw(adev); + break; + default: + break; } return 0; @@ -1605,11 +1721,6 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.mec.num_pipe_per_mec = 4; adev->gfx.mec.num_queue_per_pipe = 8; - /* KIQ event */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_IB2_INTERRUPT_PKT, &adev->gfx.kiq.irq); - if (r) - return r; - /* EOP Event */ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_EOP_INTERRUPT, &adev->gfx.eop_irq); if (r) @@ -1847,7 +1958,7 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev) mutex_unlock(&adev->srbm_mutex); } -static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) +static void gfx_v9_0_constants_init(struct amdgpu_device *adev) { u32 tmp; int i; @@ -2403,7 +2514,8 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev) return r; } - if (adev->asic_type == CHIP_RAVEN) { + if (adev->asic_type == CHIP_RAVEN || + adev->asic_type == CHIP_VEGA20) { if (amdgpu_lbpw != 0) gfx_v9_0_enable_lbpw(adev, true); else @@ -3091,7 +3203,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring) struct v9_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; - if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation)); ((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF; ((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF; @@ -3235,7 +3347,7 @@ static int gfx_v9_0_hw_init(void *handle) gfx_v9_0_init_golden_registers(adev); - gfx_v9_0_gpu_init(adev); + gfx_v9_0_constants_init(adev); r = gfx_v9_0_csb_vram_pin(adev); if (r) @@ -3310,7 +3422,7 @@ static int gfx_v9_0_hw_fini(void *handle) /* Use deinitialize sequence from CAIL when unbinding device from driver, * otherwise KIQ is hanging when binding back */ - if (!adev->in_gpu_reset && !adev->gfx.in_suspend) { + if (!adev->in_gpu_reset && !adev->in_suspend) { mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, adev->gfx.kiq.ring.me, adev->gfx.kiq.ring.pipe, @@ -3330,20 +3442,12 @@ static int gfx_v9_0_hw_fini(void *handle) static int gfx_v9_0_suspend(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->gfx.in_suspend = true; - return gfx_v9_0_hw_fini(adev); + return gfx_v9_0_hw_fini(handle); } static int gfx_v9_0_resume(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int r; - - r = gfx_v9_0_hw_init(adev); - adev->gfx.in_suspend = false; - return r; + return gfx_v9_0_hw_init(handle); } static bool gfx_v9_0_is_idle(void *handle) @@ -4609,68 +4713,6 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v9_0_kiq_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned int type, - enum amdgpu_interrupt_state state) -{ - uint32_t tmp, target; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); - - if (ring->me == 1) - target = SOC15_REG_OFFSET(GC, 0, mmCP_ME1_PIPE0_INT_CNTL); - else - target = SOC15_REG_OFFSET(GC, 0, mmCP_ME2_PIPE0_INT_CNTL); - target += ring->pipe; - - switch (type) { - case AMDGPU_CP_KIQ_IRQ_DRIVER0: - if (state == AMDGPU_IRQ_STATE_DISABLE) { - tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); - tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, - GENERIC2_INT_ENABLE, 0); - WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - - tmp = RREG32(target); - tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, - GENERIC2_INT_ENABLE, 0); - WREG32(target, tmp); - } else { - tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL); - tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, - GENERIC2_INT_ENABLE, 1); - WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp); - - tmp = RREG32(target); - tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, - GENERIC2_INT_ENABLE, 1); - WREG32(target, tmp); - } - break; - default: - BUG(); /* kiq only support GENERIC2_INT now */ - break; - } - return 0; -} - -static int gfx_v9_0_kiq_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - u8 me_id, pipe_id, queue_id; - struct amdgpu_ring *ring = &(adev->gfx.kiq.ring); - - me_id = (entry->ring_id & 0x0c) >> 2; - pipe_id = (entry->ring_id & 0x03) >> 0; - queue_id = (entry->ring_id & 0x70) >> 4; - DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", - me_id, pipe_id, queue_id); - - amdgpu_fence_process(ring); - return 0; -} - static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -4819,11 +4861,6 @@ static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) adev->gfx.compute_ring[i].funcs = &gfx_v9_0_ring_funcs_compute; } -static const struct amdgpu_irq_src_funcs gfx_v9_0_kiq_irq_funcs = { - .set = gfx_v9_0_kiq_set_interrupt_state, - .process = gfx_v9_0_kiq_irq, -}; - static const struct amdgpu_irq_src_funcs gfx_v9_0_eop_irq_funcs = { .set = gfx_v9_0_set_eop_interrupt_state, .process = gfx_v9_0_eop_irq, @@ -4849,9 +4886,6 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs; - - adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; - adev->gfx.kiq.irq.funcs = &gfx_v9_0_kiq_irq_funcs; } static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) @@ -4871,7 +4905,20 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev) { /* init asci gds info */ - adev->gds.mem.total_size = RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); + switch (adev->asic_type) { + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + adev->gds.mem.total_size = 0x10000; + break; + case CHIP_RAVEN: + adev->gds.mem.total_size = 0x1000; + break; + default: + adev->gds.mem.total_size = 0x10000; + break; + } + adev->gds.gws.total_size = 64; adev->gds.oa.total_size = 16; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 65f58ebcf835..ceb7847b504f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -82,7 +82,8 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) * to get rid of the VM fault and hardware hang. */ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1); + max((adev->gmc.vram_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); else WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 4411463ca719..e1c2b4e9c7b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -859,11 +859,11 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.vram_type = gmc_v6_0_convert_vram_type(tmp); } - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); if (r) return r; @@ -1180,8 +1180,7 @@ static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev) { - if (adev->gmc.gmc_funcs == NULL) - adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs; + adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs; } static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index ae776ce9a415..910c4ce19cb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -991,11 +991,11 @@ static int gmc_v7_0_sw_init(void *handle) adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); } - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); if (r) return r; @@ -1388,8 +1388,7 @@ static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev) { - if (adev->gmc.gmc_funcs == NULL) - adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; + adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; } static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 53ae49b8bde8..1d3265c97b70 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1095,11 +1095,11 @@ static int gmc_v8_0_sw_init(void *handle) adev->gmc.vram_type = gmc_v8_0_convert_vram_type(tmp); } - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); if (r) return r; @@ -1733,8 +1733,7 @@ static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { static void gmc_v8_0_set_gmc_funcs(struct amdgpu_device *adev) { - if (adev->gmc.gmc_funcs == NULL) - adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; + adev->gmc.gmc_funcs = &gmc_v8_0_gmc_funcs; } static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index aad3c7c5fb3a..f35d7a554ad5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -593,8 +593,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) { - if (adev->gmc.gmc_funcs == NULL) - adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; + adev->gmc.gmc_funcs = &gmc_v9_0_gmc_funcs; } static int gmc_v9_0_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c index 842c4b677b4d..cf0fc61aebe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c @@ -255,7 +255,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev, dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; + entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; @@ -297,7 +297,7 @@ static int iceland_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) return r; @@ -311,7 +311,7 @@ static int iceland_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); amdgpu_irq_remove_domain(adev); return 0; @@ -447,8 +447,7 @@ static const struct amdgpu_ih_funcs iceland_ih_funcs = { static void iceland_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &iceland_ih_funcs; + adev->irq.ih_funcs = &iceland_ih_funcs; } const struct amdgpu_ip_block_version iceland_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index cb79a93c2eb7..d0e478f43443 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2995,12 +2995,12 @@ static int kv_dpm_sw_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) return ret; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 80698b5ffa4a..14649f8475f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -100,7 +100,8 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev) * to get rid of the VM fault and hardware hang. */ WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, - (max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18) + 0x1); + max((adev->gmc.vram_end >> 18) + 0x1, + adev->gmc.agp_end >> 18)); else WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18); diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index 842567b53df5..64e875d528dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -580,11 +580,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev) { int r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq); if (r) return r; - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq); if (r) { amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 9217af00bc8d..3f3fac2d50cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -306,11 +306,8 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type) { int ret = 0; - struct psp_ring *ring; struct amdgpu_device *adev = psp->adev; - ring = &psp->km_ring; - /* Write the ring destroy command to C2PMSG_64 */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_DESTROY_RINGS); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index c403bdf8ad70..2d4770e173dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -504,41 +504,6 @@ static int sdma_v2_4_rlc_resume(struct amdgpu_device *adev) return 0; } -/** - * sdma_v2_4_load_microcode - load the sDMA ME ucode - * - * @adev: amdgpu_device pointer - * - * Loads the sDMA0/1 ucode. - * Returns 0 for success, -EINVAL if the ucode is not available. - */ -static int sdma_v2_4_load_microcode(struct amdgpu_device *adev) -{ - const struct sdma_firmware_header_v1_0 *hdr; - const __le32 *fw_data; - u32 fw_size; - int i, j; - - /* halt the MEs */ - sdma_v2_4_enable(adev, false); - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (!adev->sdma.instance[i].fw) - return -EINVAL; - hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; - amdgpu_ucode_print_sdma_hdr(&hdr->header); - fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - fw_data = (const __le32 *) - (adev->sdma.instance[i].fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); - for (j = 0; j < fw_size; j++) - WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); - } - - return 0; -} /** * sdma_v2_4_start - setup and start the async dma engines @@ -552,13 +517,6 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) { int r; - - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = sdma_v2_4_load_microcode(adev); - if (r) - return r; - } - /* halt the engine before programing */ sdma_v2_4_enable(adev, false); @@ -898,19 +856,19 @@ static int sdma_v2_4_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, &adev->sdma.trap_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, &adev->sdma.illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, &adev->sdma.illegal_inst_irq); if (r) return r; @@ -1296,10 +1254,8 @@ static const struct amdgpu_buffer_funcs sdma_v2_4_buffer_funcs = { static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &sdma_v2_4_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = { @@ -1315,15 +1271,13 @@ static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) struct drm_gpu_scheduler *sched; unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v2_4_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2677d6a1bf42..6fb3edaba0ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -318,14 +318,13 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) if (adev->sdma.instance[i].feature_version >= 20) adev->sdma.instance[i].burst_nop = true; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { - info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; - info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; - info->fw = adev->sdma.instance[i].fw; - header = (const struct common_firmware_header *)info->fw->data; - adev->firmware.fw_size += - ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); - } + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; + info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; + info->fw = adev->sdma.instance[i].fw; + header = (const struct common_firmware_header *)info->fw->data; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + } out: if (err) { @@ -778,42 +777,6 @@ static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) } /** - * sdma_v3_0_load_microcode - load the sDMA ME ucode - * - * @adev: amdgpu_device pointer - * - * Loads the sDMA0/1 ucode. - * Returns 0 for success, -EINVAL if the ucode is not available. - */ -static int sdma_v3_0_load_microcode(struct amdgpu_device *adev) -{ - const struct sdma_firmware_header_v1_0 *hdr; - const __le32 *fw_data; - u32 fw_size; - int i, j; - - /* halt the MEs */ - sdma_v3_0_enable(adev, false); - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (!adev->sdma.instance[i].fw) - return -EINVAL; - hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; - amdgpu_ucode_print_sdma_hdr(&hdr->header); - fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; - fw_data = (const __le32 *) - (adev->sdma.instance[i].fw->data + - le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); - for (j = 0; j < fw_size; j++) - WREG32(mmSDMA0_UCODE_DATA + sdma_offsets[i], le32_to_cpup(fw_data++)); - WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], adev->sdma.instance[i].fw_version); - } - - return 0; -} - -/** * sdma_v3_0_start - setup and start the async dma engines * * @adev: amdgpu_device pointer @@ -825,12 +788,6 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) { int r; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = sdma_v3_0_load_microcode(adev); - if (r) - return r; - } - /* disable sdma engine before programing it */ sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); @@ -1177,19 +1134,19 @@ static int sdma_v3_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* SDMA trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, &adev->sdma.trap_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, &adev->sdma.illegal_inst_irq); if (r) return r; /* SDMA Privileged inst */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, &adev->sdma.illegal_inst_irq); if (r) return r; @@ -1736,10 +1693,8 @@ static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { @@ -1755,15 +1710,13 @@ static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) struct drm_gpu_scheduler *sched; unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v3_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2ea1f0d8f5be..04fa3d972636 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -148,6 +148,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { @@ -177,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000), }; static const struct soc15_reg_golden golden_settings_sdma_rv1[] = @@ -818,7 +820,7 @@ sdma_v4_1_update_power_gating(struct amdgpu_device *adev, bool enable) uint32_t def, data; if (enable && (adev->pg_flags & AMD_PG_SUPPORT_SDMA)) { - /* disable idle interrupt */ + /* enable idle interrupt */ def = data = RREG32(SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CNTL)); data |= SDMA0_CNTL__CTXEMPTY_INT_ENABLE_MASK; @@ -1320,9 +1322,15 @@ static int sdma_v4_0_sw_init(void *handle) DRM_INFO("use_doorbell being set to: [%s]\n", ring->use_doorbell?"true":"false"); - ring->doorbell_index = (i == 0) ? - (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset - : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset + if (adev->asic_type == CHIP_VEGA10) + ring->doorbell_index = (i == 0) ? + (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset + : (AMDGPU_VEGA10_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset + else + ring->doorbell_index = (i == 0) ? + (AMDGPU_DOORBELL64_sDMA_ENGINE0 << 1) //get DWORD offset + : (AMDGPU_DOORBELL64_sDMA_ENGINE1 << 1); // get DWORD offset + sprintf(ring->name, "sdma%d", i); r = amdgpu_ring_init(adev, ring, 1024, @@ -1358,6 +1366,9 @@ static int sdma_v4_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); + sdma_v4_0_init_golden_registers(adev); r = sdma_v4_0_start(adev); @@ -1375,6 +1386,9 @@ static int sdma_v4_0_hw_fini(void *handle) sdma_v4_0_ctx_switch_enable(adev, false); sdma_v4_0_enable(adev, false); + if (adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs->set_powergating_by_smu) + amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, true); + return 0; } @@ -1801,10 +1815,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = { static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = { @@ -1820,15 +1832,13 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev) struct drm_gpu_scheduler *sched; unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index c364ef94cc36..f8408f88cd37 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -2057,13 +2057,13 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); else amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_device_ip_block_add(adev, &si_dma_ip_block); /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; @@ -2071,13 +2071,14 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); else amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_device_ip_block_add(adev, &si_dma_ip_block); + /* amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); */ /* amdgpu_device_ip_block_add(adev, &vce_v1_0_ip_block); */ break; @@ -2085,11 +2086,11 @@ int si_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &si_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &si_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); + amdgpu_device_ip_block_add(adev, &si_dma_ip_block); amdgpu_device_ip_block_add(adev, &si_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v6_0_ip_block); - amdgpu_device_ip_block_add(adev, &si_dma_ip_block); break; default: BUG(); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index fafaf259b17b..adbaea6da0d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -502,12 +502,14 @@ static int si_dma_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* DMA0 trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224, + &adev->sdma.trap_irq); if (r) return r; /* DMA1 trap event */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244, + &adev->sdma.trap_irq); if (r) return r; @@ -649,17 +651,10 @@ static int si_dma_process_trap_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - amdgpu_fence_process(&adev->sdma.instance[0].ring); - - return 0; -} - -static int si_dma_process_trap_irq_1(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - amdgpu_fence_process(&adev->sdma.instance[1].ring); - + if (entry->src_id == 224) + amdgpu_fence_process(&adev->sdma.instance[0].ring); + else + amdgpu_fence_process(&adev->sdma.instance[1].ring); return 0; } @@ -786,11 +781,6 @@ static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = { .process = si_dma_process_trap_irq, }; -static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs_1 = { - .set = si_dma_set_trap_irq_state, - .process = si_dma_process_trap_irq_1, -}; - static const struct amdgpu_irq_src_funcs si_dma_illegal_inst_irq_funcs = { .process = si_dma_process_illegal_inst_irq, }; @@ -799,7 +789,6 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev) { adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs; - adev->sdma.trap_irq_1.funcs = &si_dma_trap_irq_funcs_1; adev->sdma.illegal_inst_irq.funcs = &si_dma_illegal_inst_irq_funcs; } @@ -863,10 +852,8 @@ static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = { static void si_dma_set_buffer_funcs(struct amdgpu_device *adev) { - if (adev->mman.buffer_funcs == NULL) { - adev->mman.buffer_funcs = &si_dma_buffer_funcs; - adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; - } + adev->mman.buffer_funcs = &si_dma_buffer_funcs; + adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; } static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = { @@ -882,15 +869,13 @@ static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) struct drm_gpu_scheduler *sched; unsigned i; - if (adev->vm_manager.vm_pte_funcs == NULL) { - adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; - for (i = 0; i < adev->sdma.num_instances; i++) { - sched = &adev->sdma.instance[i].ring.sched; - adev->vm_manager.vm_pte_rqs[i] = - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; - } - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; + adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; + for (i = 0; i < adev->sdma.num_instances; i++) { + sched = &adev->sdma.instance[i].ring.sched; + adev->vm_manager.vm_pte_rqs[i] = + &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; } + adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; } const struct amdgpu_ip_block_version si_dma_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 1de96995e690..da58040fdbdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -7687,11 +7687,11 @@ static int si_dpm_sw_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); if (ret) return ret; - ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 60dad63098a2..b3d7d9f83202 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -142,7 +142,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev, dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; + entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; @@ -170,7 +170,7 @@ static int si_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 64 * 1024, false); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, false); if (r) return r; @@ -182,7 +182,7 @@ static int si_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); return 0; } @@ -308,8 +308,7 @@ static const struct amdgpu_ih_funcs si_ih_funcs = { static void si_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &si_ih_funcs; + adev->irq.ih_funcs = &si_ih_funcs; } const struct amdgpu_ip_block_version si_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 138c4810a3de..bf5e6a413dee 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -529,6 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); else amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) @@ -539,8 +541,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); @@ -551,6 +551,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -560,8 +562,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #else # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." #endif - amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); break; default: @@ -739,7 +739,8 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_MMHUB | - AMD_PG_SUPPORT_VCN; + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; } else { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index f5d602540673..958b10a57073 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -57,13 +57,33 @@ loop--; \ if (!loop) { \ DRM_ERROR("Register(%d) [%s] failed to reach value 0x%08x != 0x%08x\n", \ - inst, #reg, expected_value, (tmp_ & (mask))); \ + inst, #reg, (unsigned)expected_value, (unsigned)(tmp_ & (mask))); \ ret = -ETIMEDOUT; \ break; \ } \ } \ } while (0) +#define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ + ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + UVD_DPG_LMA_CTL__MASK_EN_MASK | \ + ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ + (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ + RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); }) + +#define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ + do { \ + WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ + WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ + WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ + UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ + ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ + << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ + (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ + } while (0) + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c index 52853d8a8fdd..3abffd06b5c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c @@ -266,7 +266,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev, dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]); dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]); - entry->client_id = AMDGPU_IH_CLIENTID_LEGACY; + entry->client_id = AMDGPU_IRQ_CLIENTID_LEGACY; entry->src_id = dw[0] & 0xff; entry->src_data[0] = dw[1] & 0xfffffff; entry->ring_id = dw[2] & 0xff; @@ -317,7 +317,7 @@ static int tonga_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 64 * 1024, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 64 * 1024, true); if (r) return r; @@ -334,7 +334,7 @@ static int tonga_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); amdgpu_irq_remove_domain(adev); return 0; @@ -513,8 +513,7 @@ static const struct amdgpu_ih_funcs tonga_ih_funcs = { static void tonga_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &tonga_ih_funcs; + adev->irq.ih_funcs = &tonga_ih_funcs; } const struct amdgpu_ip_block_version tonga_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 8a926d1df939..1fc17bf39fed 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -108,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 50248059412e..fde6ad5ac9ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -105,7 +105,7 @@ static int uvd_v5_0_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 6ae82cc2e55e..7a5b40275e8e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -274,7 +274,7 @@ err: */ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; @@ -310,11 +310,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - if (direct) - r = amdgpu_job_submit_direct(job, ring, &f); - else - r = amdgpu_job_submit(job, &ring->adev->vce.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -345,7 +341,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = uvd_v6_0_enc_get_destroy_msg(ring, 1, true, &fence); + r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; @@ -393,14 +389,14 @@ static int uvd_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index a289f6a20b6b..58b39afcfb86 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -280,8 +280,8 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) +static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + struct dma_fence **fence) { const unsigned ib_size_dw = 16; struct amdgpu_job *job; @@ -317,11 +317,7 @@ int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - if (direct) - r = amdgpu_job_submit_direct(job, ring, &f); - else - r = amdgpu_job_submit(job, &ring->adev->vce.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); + r = amdgpu_job_submit_direct(job, ring, &f); if (r) goto err; @@ -352,7 +348,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); + r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence); if (r) { DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 7eaa54ba016b..ea28828360d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -417,7 +417,7 @@ static int vce_v2_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* VCE */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index c8390f9adfd6..6dbd39730070 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -423,7 +423,7 @@ static int vce_v3_0_sw_init(void *handle) int r, i; /* VCE */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_VCE_TRAP, &adev->vce.irq); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 2664bb2c47c3..eae90922fdbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -37,6 +37,11 @@ #include "ivsrcid/vcn/irqsrcs_vcn_1_0.h" +#define mmUVD_RBC_XX_IB_REG_CHECK 0x05ab +#define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 1 +#define mmUVD_REG_XX_MASK 0x05ac +#define mmUVD_REG_XX_MASK_BASE_IDX 1 + static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -198,7 +203,8 @@ static int vcn_v1_0_hw_init(void *handle) done: if (!r) - DRM_INFO("VCN decode and encode initialized successfully.\n"); + DRM_INFO("VCN decode and encode initialized successfully(under %s).\n", + (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode"); return r; } @@ -266,17 +272,18 @@ static int vcn_v1_0_resume(void *handle) } /** - * vcn_v1_0_mc_resume - memory controller programming + * vcn_v1_0_mc_resume_spg_mode - memory controller programming * * @adev: amdgpu_device pointer * * Let the VCN memory controller know it's offsets */ -static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) +static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev) { uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); uint32_t offset; + /* cache window 0: fw */ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo)); @@ -296,20 +303,21 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); + /* cache window 1: stack */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, lower_32_bits(adev->vcn.gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, upper_32_bits(adev->vcn.gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + /* cache window 2: context */ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); + lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE)); + upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, - AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40)); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); @@ -317,6 +325,96 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) adev->gfx.config.gb_addr_config); WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); +} + +static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev) +{ + uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); + uint32_t offset; + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo), + 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi), + 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0, + 0xFFFFFFFF, 0); + offset = 0; + } else { + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr), 0xFFFFFFFF, 0); + offset = size; + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0xFFFFFFFF, 0); + } + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size, 0xFFFFFFFF, 0); + + /* cache window 1: stack */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr + offset), 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0, + 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE, + 0xFFFFFFFF, 0); + + /* cache window 2: context */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), + 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE, + 0xFFFFFFFF, 0); + + /* VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_UDEC_DBW_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_CURR_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_RECON1_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MIF_REF_UV_ADDR_CONFIG, + adev->gfx.config.gb_addr_config, 0xFFFFFFFF, 0); } /** @@ -519,6 +617,60 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); } +static void vcn_v1_0_clock_gating_dpg_mode(struct amdgpu_device *adev, uint8_t sram_sel) +{ + uint32_t reg_data = 0; + + /* disable JPEG CGC */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmJPEG_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + + /* enable sw clock gating control */ + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) + reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + else + reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; + reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT; + reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT; + reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | + UVD_CGC_CTRL__UDEC_CM_MODE_MASK | + UVD_CGC_CTRL__UDEC_IT_MODE_MASK | + UVD_CGC_CTRL__UDEC_DB_MODE_MASK | + UVD_CGC_CTRL__UDEC_MP_MODE_MASK | + UVD_CGC_CTRL__SYS_MODE_MASK | + UVD_CGC_CTRL__UDEC_MODE_MASK | + UVD_CGC_CTRL__MPEG2_MODE_MASK | + UVD_CGC_CTRL__REGS_MODE_MASK | + UVD_CGC_CTRL__RBC_MODE_MASK | + UVD_CGC_CTRL__LMI_MC_MODE_MASK | + UVD_CGC_CTRL__LMI_UMC_MODE_MASK | + UVD_CGC_CTRL__IDCT_MODE_MASK | + UVD_CGC_CTRL__MPRD_MODE_MASK | + UVD_CGC_CTRL__MPC_MODE_MASK | + UVD_CGC_CTRL__LBSI_MODE_MASK | + UVD_CGC_CTRL__LRBBM_MODE_MASK | + UVD_CGC_CTRL__WCB_MODE_MASK | + UVD_CGC_CTRL__VCPU_MODE_MASK | + UVD_CGC_CTRL__SCPU_MODE_MASK); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_CTRL, reg_data, 0xFFFFFFFF, sram_sel); + + /* turn off clock gating */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_CGC_GATE, 0, 0xFFFFFFFF, sram_sel); + + /* turn on SUVD clock gating */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_GATE, 1, 0xFFFFFFFF, sram_sel); + + /* turn on sw mode in UVD_SUVD_CGC_CTRL */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SUVD_CGC_CTRL, 0, 0xFFFFFFFF, sram_sel); +} + static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) { uint32_t data = 0; @@ -614,7 +766,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) * * Setup and start the VCN block */ -static int vcn_v1_0_start(struct amdgpu_device *adev) +static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev) { struct amdgpu_ring *ring = &adev->vcn.ring_dec; uint32_t rb_bufsz, tmp; @@ -625,41 +777,24 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) lmi_swap_cntl = 0; vcn_1_0_disable_static_power_gating(adev); + + tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); + /* disable clock gating */ vcn_v1_0_disable_clock_gating(adev); - vcn_v1_0_mc_resume(adev); - /* disable interupt */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); - /* stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); - /* initialize VCN memory controller */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, - (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L); + tmp = RREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL); + WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -667,41 +802,61 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) #endif WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88); + tmp = RREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL); + tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK; + tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT; + WREG32_SOC15(UVD, 0, mmUVD_MPC_CNTL, tmp); + + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT))); + + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT))); - /* take all subblocks out of reset, except VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); + + vcn_v1_0_mc_resume_spg_mode(adev); + + WREG32_SOC15(UVD, 0, mmUVD_REG_XX_MASK, 0x10); + WREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, + RREG32_SOC15(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK) | 0x3); /* enable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, - UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); + + /* boot up the VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); /* enable UMC */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - /* boot up the VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0); - mdelay(10); + tmp = RREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, tmp); for (i = 0; i < 10; ++i) { uint32_t status; for (j = 0; j < 100; ++j) { status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); - if (status & 2) + if (status & UVD_STATUS__IDLE) break; mdelay(10); } r = 0; - if (status & 2) + if (status & UVD_STATUS__IDLE) break; DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n"); @@ -721,24 +876,22 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) } /* enable master interrupt */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); + UVD_MASTINT_EN__VCPU_EN_MASK, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* enable system interrupt for JRBC, TODO: move to set interrupt*/ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SYS_INT_EN), UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, ~UVD_SYS_INT_EN__UVD_JRBC_EN_MASK); - /* clear the bit 4 of VCN_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* clear the busy bit of UVD_STATUS */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_STATUS) & ~UVD_STATUS__UVD_BUSY; + WREG32_SOC15(UVD, 0, mmUVD_STATUS, tmp); /* force RBC into idle state */ rb_bufsz = order_base_2(ring->ring_size); tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); @@ -759,6 +912,8 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); + WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); + ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); @@ -782,12 +937,13 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) ring = &adev->vcn.ring_jpeg; WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_VMID, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L)); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_NO_FETCH_MASK | + UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW, lower_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_RPTR, 0); WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR, 0); - WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L); + WREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_CNTL, UVD_JRBC_RB_CNTL__RB_RPTR_WR_EN_MASK); /* initialize wptr */ ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); @@ -799,6 +955,166 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) return 0; } +static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = &adev->vcn.ring_dec; + uint32_t rb_bufsz, tmp; + uint32_t lmi_swap_cntl; + + /* disable byte swapping */ + lmi_swap_cntl = 0; + + vcn_1_0_enable_static_power_gating(adev); + + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK; + WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, tmp); + + /* enable clock gating */ + vcn_v1_0_clock_gating_dpg_mode(adev, 0); + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + tmp |= UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK; + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_VCPU_CNTL, tmp, 0xFFFFFFFF, 0); + + /* disable interupt */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + 0, UVD_MASTINT_EN__VCPU_EN_MASK, 0); + + /* initialize VCN memory controller */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + 0x00100000L, 0xFFFFFFFF, 0); + +#ifdef __BIG_ENDIAN + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; +#endif + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl, 0xFFFFFFFF, 0); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_CNTL, + 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0xFFFFFFFF, 0); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXA0, + ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0xFFFFFFFF, 0); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUXB0, + ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | + (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | + (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0xFFFFFFFF, 0); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MPC_SET_MUX, + ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | + (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | + (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0xFFFFFFFF, 0); + + vcn_v1_0_mc_resume_dpg_mode(adev); + + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_REG_XX_MASK, 0x10, 0xFFFFFFFF, 0); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK, 0x3, 0xFFFFFFFF, 0); + + /* boot up the VCPU */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SOFT_RESET, 0, 0xFFFFFFFF, 0); + + /* enable UMC */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL2, + 0x1F << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT, + 0xFFFFFFFF, 0); + + /* enable master interrupt */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_MASTINT_EN, + UVD_MASTINT_EN__VCPU_EN_MASK, UVD_MASTINT_EN__VCPU_EN_MASK, 0); + + vcn_v1_0_clock_gating_dpg_mode(adev, 1); + /* setup mmUVD_LMI_CTRL */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_LMI_CTRL, + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + 0x00100000L, 0xFFFFFFFF, 1); + + tmp = adev->gfx.config.gb_addr_config; + /* setup VCN global tiling registers */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_JPEG_UV_ADDR_CONFIG, tmp, 0xFFFFFFFF, 1); + + /* enable System Interrupt for JRBC */ + WREG32_SOC15_DPG_MODE(UVD, 0, mmUVD_SYS_INT_EN, + UVD_SYS_INT_EN__UVD_JRBC_EN_MASK, 0xFFFFFFFF, 1); + + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); + + WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, 0); + + ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, + ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + /* initialize wptr */ + ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_JRBC_RB_WPTR); + + /* copy patch commands to the jpeg ring */ + vcn_v1_0_jpeg_ring_set_patch_ring(ring, + (ring->wptr + ring->max_dw * amdgpu_sched_hw_submission)); + + return 0; +} + +static int vcn_v1_0_start(struct amdgpu_device *adev) +{ + int r; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + r = vcn_v1_0_start_dpg_mode(adev); + else + r = vcn_v1_0_start_spg_mode(adev); + return r; +} + /** * vcn_v1_0_stop - stop VCN block * @@ -806,41 +1122,90 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) * * stop the VCN block */ -static int vcn_v1_0_stop(struct amdgpu_device *adev) +static int vcn_v1_0_stop_spg_mode(struct amdgpu_device *adev) { - /* force RBC into idle state */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101); + int ret_code, tmp; - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7, ret_code); + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); /* put VCPU into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_LMI_STATUS, tmp, tmp, ret_code); /* disable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0); + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__CLK_EN_MASK); - /* Unstall UMC and register bus */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* reset LMI UMC/LMI */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK); - WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); + WREG32_SOC15(UVD, 0, mmUVD_STATUS, 0); vcn_v1_0_enable_clock_gating(adev); vcn_1_0_enable_static_power_gating(adev); return 0; } +static int vcn_v1_0_stop_dpg_mode(struct amdgpu_device *adev) +{ + int ret_code = 0; + + /* Wait for power status to be UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF */ + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + + if (!ret_code) { + int tmp = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF; + /* wait for read ptr to be equal to write ptr */ + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code); + + SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code); + } + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + return 0; +} + +static int vcn_v1_0_stop(struct amdgpu_device *adev) +{ + int r; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + r = vcn_v1_0_stop_dpg_mode(adev); + else + r = vcn_v1_0_stop_spg_mode(adev); + + return r; +} + static bool vcn_v1_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); + return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == UVD_STATUS__IDLE); } static int vcn_v1_0_wait_for_idle(void *handle) @@ -848,7 +1213,8 @@ static int vcn_v1_0_wait_for_idle(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; - SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE, ret); return ret; } @@ -910,6 +1276,10 @@ static void vcn_v1_0_dec_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + WREG32_SOC15(UVD, 0, mmUVD_SCRATCH2, + lower_32_bits(ring->wptr) | 0x80000000); + WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } @@ -1633,12 +2003,20 @@ static int vcn_v1_0_set_powergating_state(void *handle, * revisit this when there is a cleaner line between * the smc and the hw blocks */ + int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if(state == adev->vcn.cur_state) + return 0; + if (state == AMD_PG_STATE_GATE) - return vcn_v1_0_stop(adev); + ret = vcn_v1_0_stop(adev); else - return vcn_v1_0_start(adev); + ret = vcn_v1_0_start(adev); + + if(!ret) + adev->vcn.cur_state = state; + return ret; } static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index acbe5a770207..a99f71797aa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -380,7 +380,7 @@ static int vega10_ih_sw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_ih_ring_init(adev, 256 * 1024, true); + r = amdgpu_ih_ring_init(adev, &adev->irq.ih, 256 * 1024, true); if (r) return r; @@ -397,7 +397,7 @@ static int vega10_ih_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_irq_fini(adev); - amdgpu_ih_ring_fini(adev); + amdgpu_ih_ring_fini(adev, &adev->irq.ih); return 0; } @@ -494,8 +494,7 @@ static const struct amdgpu_ih_funcs vega10_ih_funcs = { static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev) { - if (adev->irq.ih_funcs == NULL) - adev->irq.ih_funcs = &vega10_ih_funcs; + adev->irq.ih_funcs = &vega10_ih_funcs; } const struct amdgpu_ip_block_version vega10_ih_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 88b57a5e9489..07880d35e9de 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1596,16 +1596,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v7_4_ip_block); amdgpu_device_ip_block_add(adev, &iceland_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block); break; case CHIP_FIJI: amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_5_ip_block); amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -1615,8 +1617,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v10_1_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) { amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); @@ -1626,6 +1626,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -1635,8 +1637,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v10_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) { amdgpu_device_ip_block_add(adev, &uvd_v5_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); @@ -1649,6 +1649,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -1658,8 +1660,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); break; @@ -1667,6 +1667,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -1676,8 +1678,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_0_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); #if defined(CONFIG_DRM_AMD_ACP) @@ -1688,6 +1688,8 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_0_ip_block); amdgpu_device_ip_block_add(adev, &cz_ih_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); + amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); if (adev->enable_virtual_display) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -1697,8 +1699,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) #endif else amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &gfx_v8_1_ip_block); - amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block); amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); #if defined(CONFIG_DRM_AMD_ACP) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 758398bdb39b..14d5b5fa822d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -447,6 +447,24 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, return retval; } +static int kfd_ioctl_get_queue_wave_state(struct file *filep, + struct kfd_process *p, void *data) +{ + struct kfd_ioctl_get_queue_wave_state_args *args = data; + int r; + + mutex_lock(&p->mutex); + + r = pqm_get_wave_state(&p->pqm, args->queue_id, + (void __user *)args->ctl_stack_address, + &args->ctl_stack_used_size, + &args->save_area_used_size); + + mutex_unlock(&p->mutex); + + return r; +} + static int kfd_ioctl_set_memory_policy(struct file *filep, struct kfd_process *p, void *data) { @@ -1615,6 +1633,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK, kfd_ioctl_set_cu_mask, 0), + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_QUEUE_WAVE_STATE, + kfd_ioctl_get_queue_wave_state, 0) + }; #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index d4560f1869bd..56412b0e7e1c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -647,6 +647,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(polaris11_cache_info); break; case CHIP_VEGA10: + case CHIP_VEGA20: pcache_info = vega10_cache_info; num_of_cache_types = ARRAY_SIZE(vega10_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9b4e6ad4a7df..a9f18ea7e354 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -53,6 +53,7 @@ static const struct kfd_device_info kaveri_device_info = { .needs_iommu_device = true, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info carrizo_device_info = { @@ -69,6 +70,7 @@ static const struct kfd_device_info carrizo_device_info = { .needs_iommu_device = true, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info raven_device_info = { @@ -84,6 +86,7 @@ static const struct kfd_device_info raven_device_info = { .needs_iommu_device = true, .needs_pci_atomics = true, .num_sdma_engines = 1, + .num_sdma_queues_per_engine = 2, }; #endif @@ -101,6 +104,7 @@ static const struct kfd_device_info hawaii_device_info = { .needs_iommu_device = false, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info tonga_device_info = { @@ -116,21 +120,7 @@ static const struct kfd_device_info tonga_device_info = { .needs_iommu_device = false, .needs_pci_atomics = true, .num_sdma_engines = 2, -}; - -static const struct kfd_device_info tonga_vf_device_info = { - .asic_family = CHIP_TONGA, - .max_pasid_bits = 16, - .max_no_of_hqd = 24, - .doorbell_size = 4, - .ih_ring_entry_size = 4 * sizeof(uint32_t), - .event_interrupt_class = &event_interrupt_class_cik, - .num_of_watch_points = 4, - .mqd_size_aligned = MQD_SIZE_ALIGNED, - .supports_cwsr = false, - .needs_iommu_device = false, - .needs_pci_atomics = false, - .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_device_info = { @@ -146,6 +136,7 @@ static const struct kfd_device_info fiji_device_info = { .needs_iommu_device = false, .needs_pci_atomics = true, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_vf_device_info = { @@ -161,6 +152,7 @@ static const struct kfd_device_info fiji_vf_device_info = { .needs_iommu_device = false, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; @@ -177,6 +169,7 @@ static const struct kfd_device_info polaris10_device_info = { .needs_iommu_device = false, .needs_pci_atomics = true, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris10_vf_device_info = { @@ -192,6 +185,7 @@ static const struct kfd_device_info polaris10_vf_device_info = { .needs_iommu_device = false, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris11_device_info = { @@ -207,6 +201,7 @@ static const struct kfd_device_info polaris11_device_info = { .needs_iommu_device = false, .needs_pci_atomics = true, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_device_info = { @@ -222,6 +217,7 @@ static const struct kfd_device_info vega10_device_info = { .needs_iommu_device = false, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_vf_device_info = { @@ -237,8 +233,24 @@ static const struct kfd_device_info vega10_vf_device_info = { .needs_iommu_device = false, .needs_pci_atomics = false, .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 2, }; +static const struct kfd_device_info vega20_device_info = { + .asic_family = CHIP_VEGA20, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_sdma_queues_per_engine = 8, +}; struct kfd_deviceid { unsigned short did; @@ -293,7 +305,6 @@ static const struct kfd_deviceid supported_devices[] = { { 0x6928, &tonga_device_info }, /* Tonga */ { 0x6929, &tonga_device_info }, /* Tonga */ { 0x692B, &tonga_device_info }, /* Tonga */ - { 0x692F, &tonga_vf_device_info }, /* Tonga vf */ { 0x6938, &tonga_device_info }, /* Tonga */ { 0x6939, &tonga_device_info }, /* Tonga */ { 0x7300, &fiji_device_info }, /* Fiji */ @@ -328,6 +339,12 @@ static const struct kfd_deviceid supported_devices[] = { { 0x6868, &vega10_device_info }, /* Vega10 */ { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ { 0x687F, &vega10_device_info }, /* Vega10 */ + { 0x66a0, &vega20_device_info }, /* Vega20 */ + { 0x66a1, &vega20_device_info }, /* Vega20 */ + { 0x66a2, &vega20_device_info }, /* Vega20 */ + { 0x66a3, &vega20_device_info }, /* Vega20 */ + { 0x66a7, &vega20_device_info }, /* Vega20 */ + { 0x66af, &vega20_device_info } /* Vega20 */ }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, @@ -366,6 +383,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, return NULL; } + kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); + if (!kfd) + return NULL; + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be * supported. @@ -377,12 +398,10 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, dev_info(kfd_device, "skipped device %x:%x, PCI rejects atomics\n", pdev->vendor, pdev->device); + kfree(kfd); return NULL; - } - - kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); - if (!kfd) - return NULL; + } else if (!ret) + kfd->pci_atomic_requested = true; kfd->kgd = kgd; kfd->device_info = device_info; @@ -419,6 +438,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, { unsigned int size; + kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + KGD_ENGINE_MEC1); + kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd, + KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index ec0d62a16e53..a3b933967171 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -109,7 +109,7 @@ static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return dqm->dev->device_info->num_sdma_engines - * KFD_SDMA_QUEUES_PER_ENGINE; + * dqm->dev->device_info->num_sdma_queues_per_engine; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, struct queue *q, struct qcm_process_device *qpd) { - int retval; struct mqd_manager *mqd_mgr; + int retval; mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); if (!mqd_mgr) @@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, if (!q->properties.is_active) return 0; - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, - &q->properties, q->process->mm); + if (WARN(q->process->mm != current->mm, + "should only run in user thread")) + retval = -EFAULT; + else + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, + &q->properties, current->mm); if (retval) goto out_uninit_mqd; @@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) retval = map_queues_cpsch(dqm); else if (q->properties.is_active && (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || - q->properties.type == KFD_QUEUE_TYPE_SDMA)) - retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, - &q->properties, q->process->mm); + q->properties.type == KFD_QUEUE_TYPE_SDMA)) { + if (WARN(q->process->mm != current->mm, + "should only run in user thread")) + retval = -EFAULT; + else + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, + q->pipe, q->queue, + &q->properties, current->mm); + } out_unlock: dqm_unlock(dqm); @@ -653,10 +663,11 @@ out: static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { + struct mm_struct *mm = NULL; struct queue *q; struct mqd_manager *mqd_mgr; struct kfd_process_device *pdd; - uint32_t pd_base; + uint64_t pd_base; int retval = 0; pdd = qpd_to_pdd(qpd); @@ -676,7 +687,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, /* Update PD Base in QPD */ qpd->page_table_base = pd_base; - pr_debug("Updated PD address to 0x%08x\n", pd_base); + pr_debug("Updated PD address to 0x%llx\n", pd_base); if (!list_empty(&qpd->queues_list)) { dqm->dev->kfd2kgd->set_vm_context_page_table_base( @@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, kfd_flush_tlb(pdd); } + /* Take a safe reference to the mm_struct, which may otherwise + * disappear even while the kfd_process is still referenced. + */ + mm = get_task_mm(pdd->process->lead_thread); + if (!mm) { + retval = -EFAULT; + goto out; + } + /* activate all active queues on the qpd */ list_for_each_entry(q, &qpd->queues_list, list) { if (!q->properties.is_evicted) @@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, q->properties.is_evicted = false; q->properties.is_active = true; retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, - q->queue, &q->properties, - q->process->mm); + q->queue, &q->properties, mm); if (retval) goto out; dqm->queue_count++; } qpd->evicted = 0; out: + if (mm) + mmput(mm); dqm_unlock(dqm); return retval; } @@ -717,7 +738,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, { struct queue *q; struct kfd_process_device *pdd; - uint32_t pd_base; + uint64_t pd_base; int retval = 0; pdd = qpd_to_pdd(qpd); @@ -737,7 +758,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, /* Update PD Base in QPD */ qpd->page_table_base = pd_base; - pr_debug("Updated PD address to 0x%08x\n", pd_base); + pr_debug("Updated PD address to 0x%llx\n", pd_base); /* activate all active queues on the qpd */ list_for_each_entry(q, &qpd->queues_list, list) { @@ -761,7 +782,7 @@ static int register_process(struct device_queue_manager *dqm, { struct device_process_node *n; struct kfd_process_device *pdd; - uint32_t pd_base; + uint64_t pd_base; int retval; n = kzalloc(sizeof(*n), GFP_KERNEL); @@ -779,6 +800,7 @@ static int register_process(struct device_queue_manager *dqm, /* Update PD Base in QPD */ qpd->page_table_base = pd_base; + pr_debug("Updated PD address to 0x%llx\n", pd_base); retval = dqm->asic_ops.update_qpd(dqm, qpd); @@ -1342,9 +1364,6 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, { int retval; struct mqd_manager *mqd_mgr; - bool preempt_all_queues; - - preempt_all_queues = false; retval = 0; @@ -1528,6 +1547,41 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm, return retval; } +static int get_wave_state(struct device_queue_manager *dqm, + struct queue *q, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + struct mqd_manager *mqd; + int r; + + dqm_lock(dqm); + + if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || + q->properties.is_active || !q->device->cwsr_enabled) { + r = -EINVAL; + goto dqm_unlock; + } + + mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); + if (!mqd) { + r = -ENOMEM; + goto dqm_unlock; + } + + if (!mqd->get_wave_state) { + r = -EINVAL; + goto dqm_unlock; + } + + r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size, + save_area_used_size); + +dqm_unlock: + dqm_unlock(dqm); + return r; +} static int process_termination_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) @@ -1649,6 +1703,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.process_termination = process_termination_cpsch; dqm->ops.evict_process_queues = evict_process_queues_cpsch; dqm->ops.restore_process_queues = restore_process_queues_cpsch; + dqm->ops.get_wave_state = get_wave_state; break; case KFD_SCHED_POLICY_NO_HWS: /* initialize dqm for no cp scheduling */ @@ -1668,6 +1723,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) dqm->ops.evict_process_queues = evict_process_queues_nocpsch; dqm->ops.restore_process_queues = restore_process_queues_nocpsch; + dqm->ops.get_wave_state = get_wave_state; break; default: pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); @@ -1695,6 +1751,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: device_queue_manager_init_v9(&dqm->asic_ops); break; @@ -1806,7 +1863,9 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { - for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) { + for (queue = 0; + queue < dqm->dev->device_info->num_sdma_queues_per_engine; + queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->kgd, pipe, queue, &dump, &n_regs); if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 00da3169a004..70e38a2e23b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -33,7 +33,6 @@ #define KFD_UNMAP_LATENCY_MS (4000) #define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000) -#define KFD_SDMA_QUEUES_PER_ENGINE (2) struct device_process_node { struct qcm_process_device *qpd; @@ -82,6 +81,8 @@ struct device_process_node { * * @restore_process_queues: Restore all evicted queues queues of a process * + * @get_wave_state: Retrieves context save state and optionally copies the + * control stack, if kept in the MQD, to the given userspace address. */ struct device_queue_manager_ops { @@ -137,6 +138,12 @@ struct device_queue_manager_ops { struct qcm_process_device *qpd); int (*restore_process_queues)(struct device_queue_manager *dqm, struct qcm_process_device *qpd); + + int (*get_wave_state)(struct device_queue_manager *dqm, + struct queue *q, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size); }; struct device_queue_manager_asic_ops { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 97d5423c5673..3d66cec414af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -400,6 +400,7 @@ int kfd_init_apertures(struct kfd_process *process) kfd_init_apertures_vi(pdd, id); break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: kfd_init_apertures_v9(pdd, id); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 9f84b4d9fb88..6c31f7370193 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -322,6 +322,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: kernel_queue_init_v9(&kq->ops_asic_specific); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index 684a3bf07efd..33830b1a5a54 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -71,8 +71,7 @@ static int pm_map_process_v9(struct packet_manager *pm, uint32_t *buffer, struct qcm_process_device *qpd) { struct pm4_mes_map_process *packet; - uint64_t vm_page_table_base_addr = - (uint64_t)(qpd->page_table_base) << 12; + uint64_t vm_page_table_base_addr = qpd->page_table_base; packet = (struct pm4_mes_map_process *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process)); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 3bc25ab84f34..e33019a7a883 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -39,6 +39,7 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, case CHIP_POLARIS11: return mqd_manager_init_vi_tonga(type, dev); case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: return mqd_manager_init_v9(type, dev); default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 4e84052d4e21..f8261313ae7b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -43,6 +43,9 @@ * * @is_occupied: Checks if the relevant HQD slot is occupied. * + * @get_wave_state: Retrieves context save state and optionally copies the + * control stack, if kept in the MQD, to the given userspace address. + * * @mqd_mutex: Mqd manager mutex. * * @dev: The kfd device structure coupled with this module. @@ -85,6 +88,11 @@ struct mqd_manager { uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); + int (*get_wave_state)(struct mqd_manager *mm, void *mqd, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size); + #if defined(CONFIG_DEBUG_FS) int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 0cedb37cf513..f381c1cb27bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -266,6 +266,28 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, pipe_id, queue_id); } +static int get_wave_state(struct mqd_manager *mm, void *mqd, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + struct v9_mqd *m; + + /* Control stack is located one page after MQD. */ + void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); + + m = get_mqd(mqd); + + *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - + m->cp_hqd_cntl_stack_offset; + *save_area_used_size = m->cp_hqd_wg_state_offset; + + if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) + return -EFAULT; + + return 0; +} + static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -435,6 +457,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; + mqd->get_wave_state = get_wave_state; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index b81fda3754da..6469b3456f00 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -269,6 +269,28 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, pipe_id, queue_id); } +static int get_wave_state(struct mqd_manager *mm, void *mqd, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + struct vi_mqd *m; + + m = get_mqd(mqd); + + *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - + m->cp_hqd_cntl_stack_offset; + *save_area_used_size = m->cp_hqd_wg_state_offset - + m->cp_hqd_cntl_stack_size; + + /* Control stack is not copied to user mode for GFXv8 because + * it's part of the context save area that is already + * accessible to user mode + */ + + return 0; +} + static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *q) @@ -436,6 +458,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->update_mqd = update_mqd; mqd->destroy_mqd = destroy_mqd; mqd->is_occupied = is_occupied; + mqd->get_wave_state = get_wave_state; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 1092631765cb..c6080ed3b6a7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -229,6 +229,7 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) pm->pmf = &kfd_vi_pm_funcs; break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: pm->pmf = &kfd_v9_pm_funcs; break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b0064b08aa11..53ff86d45d91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -176,6 +176,7 @@ struct kfd_device_info { bool needs_iommu_device; bool needs_pci_atomics; unsigned int num_sdma_engines; + unsigned int num_sdma_queues_per_engine; }; struct kfd_mem_obj { @@ -247,6 +248,10 @@ struct kfd_dev { /* Debug manager */ struct kfd_dbgmgr *dbgmgr; + /* Firmware versions */ + uint16_t mec_fw_version; + uint16_t sdma_fw_version; + /* Maximum process number mapped to HW scheduler */ unsigned int max_proc_per_quantum; @@ -257,6 +262,8 @@ struct kfd_dev { /* xGMI */ uint64_t hive_id; + + bool pci_atomic_requested; }; /* KGD2KFD callbacks */ @@ -500,11 +507,11 @@ struct qcm_process_device { * All the memory management data should be here too */ uint64_t gds_context_area; + uint64_t page_table_base; uint32_t sh_mem_config; uint32_t sh_mem_bases; uint32_t sh_mem_ape1_base; uint32_t sh_mem_ape1_limit; - uint32_t page_table_base; uint32_t gds_size; uint32_t num_gws; uint32_t num_oac; @@ -856,6 +863,11 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, struct queue_properties *p); struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, unsigned int qid); +int pqm_get_wave_state(struct process_queue_manager *pqm, + unsigned int qid, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size); int amdkfd_fence_wait_timeout(unsigned int *fence_addr, unsigned int fence_value, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c8cad9c078ae..fcaaf93681ac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -408,6 +408,28 @@ struct kernel_queue *pqm_get_kernel_queue( return NULL; } +int pqm_get_wave_state(struct process_queue_manager *pqm, + unsigned int qid, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + struct process_queue_node *pqn; + + pqn = get_queue_by_qid(pqm, qid); + if (!pqn) { + pr_debug("amdkfd: No queue %d exists for operation\n", + qid); + return -EFAULT; + } + + return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm, + pqn->q, + ctl_stack, + ctl_stack_used_size, + save_area_used_size); +} + #if defined(CONFIG_DEBUG_FS) int pqm_debugfs_mqds(struct seq_file *m, void *data) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 0dff66be8d7a..e3843c5929ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -482,11 +482,11 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, (unsigned long long int) 0); sysfs_show_32bit_prop(buffer, "fw_version", - dev->gpu->kfd2kgd->get_fw_version( - dev->gpu->kgd, - KGD_ENGINE_MEC1)); + dev->gpu->mec_fw_version); sysfs_show_32bit_prop(buffer, "capability", dev->node_props.capability); + sysfs_show_32bit_prop(buffer, "sdma_fw_version", + dev->gpu->sdma_fw_version); } return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute", @@ -1127,17 +1127,40 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) { - struct kfd_iolink_properties *link; + struct kfd_iolink_properties *link, *cpu_link; + struct kfd_topology_device *cpu_dev; + uint32_t cap; + uint32_t cpu_flag = CRAT_IOLINK_FLAGS_ENABLED; + uint32_t flag = CRAT_IOLINK_FLAGS_ENABLED; if (!dev || !dev->gpu) return; - /* GPU only creates direck links so apply flags setting to all */ - if (dev->gpu->device_info->asic_family == CHIP_HAWAII) - list_for_each_entry(link, &dev->io_link_props, list) - link->flags = CRAT_IOLINK_FLAGS_ENABLED | - CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | - CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; + pcie_capability_read_dword(dev->gpu->pdev, + PCI_EXP_DEVCAP2, &cap); + + if (!(cap & (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64))) + cpu_flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | + CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; + + if (!dev->gpu->pci_atomic_requested || + dev->gpu->device_info->asic_family == CHIP_HAWAII) + flag |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | + CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; + + /* GPU only creates direct links so apply flags setting to all */ + list_for_each_entry(link, &dev->io_link_props, list) { + link->flags = flag; + cpu_dev = kfd_topology_device_by_proximity_domain( + link->node_to); + if (cpu_dev) { + list_for_each_entry(cpu_link, + &cpu_dev->io_link_props, list) + if (cpu_link->node_to == link->node_from) + cpu_link->flags = cpu_flag; + } + } } int kfd_topology_add_device(struct kfd_dev *gpu) @@ -1255,6 +1278,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; case CHIP_VEGA10: + case CHIP_VEGA20: case CHIP_RAVEN: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2be1e3033ce4..e5294d1a3049 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -338,14 +338,6 @@ static int dm_set_powergating_state(void *handle, /* Prototypes of private functions */ static int dm_early_init(void* handle); -static void hotplug_notify_work_func(struct work_struct *work) -{ - struct amdgpu_display_manager *dm = container_of(work, struct amdgpu_display_manager, mst_hotplug_work); - struct drm_device *dev = dm->ddev; - - drm_kms_helper_hotplug_event(dev); -} - /* Allocate memory for FBC compressed data */ static void amdgpu_dm_fbc_init(struct drm_connector *connector) { @@ -447,8 +439,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } - INIT_WORK(&adev->dm.mst_hotplug_work, hotplug_notify_work_func); - adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -728,6 +718,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, return NULL; } +static void emulated_link_detect(struct dc_link *link) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct display_sink_capability sink_caps = { 0 }; + enum dc_edid_status edid_status; + struct dc_context *dc_ctx = link->ctx; + struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + + link->type = dc_connection_none; + prev_sink = link->local_sink; + + if (prev_sink != NULL) + dc_sink_retain(prev_sink); + + switch (link->connector_signal) { + case SIGNAL_TYPE_HDMI_TYPE_A: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; + break; + } + + case SIGNAL_TYPE_DVI_SINGLE_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_DUAL_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + } + + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + + case SIGNAL_TYPE_EDP: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_EDP; + break; + } + + case SIGNAL_TYPE_DISPLAY_PORT: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_VIRTUAL; + break; + } + + default: + DC_ERROR("Invalid connector type! signal:%d\n", + link->connector_signal); + return; + } + + sink_init_data.link = link; + sink_init_data.sink_signal = sink_caps.signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DC_ERROR("Failed to create sink!\n"); + return; + } + + link->local_sink = sink; + + edid_status = dm_helpers_read_local_edid( + link->ctx, + link, + sink); + + if (edid_status != EDID_OK) + DC_ERROR("Failed to read EDID"); + +} + static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; @@ -741,6 +812,7 @@ static int dm_resume(void *handle) struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; + enum dc_connection_type new_connection_type = dc_connection_none; int ret; int i; @@ -771,7 +843,13 @@ static int dm_resume(void *handle) continue; mutex_lock(&aconnector->hpd_lock); - dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) + emulated_link_detect(aconnector->dc_link); + else + dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); if (aconnector->fake_enable && aconnector->dc_link->local_sink) aconnector->fake_enable = false; @@ -1020,6 +1098,7 @@ static void handle_hpd_irq(void *param) struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; + enum dc_connection_type new_connection_type = dc_connection_none; /* * In case of failure or MST no need to update connector status or notify the OS @@ -1030,7 +1109,21 @@ static void handle_hpd_irq(void *param) if (aconnector->fake_enable) aconnector->fake_enable = false; - if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { + if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(aconnector->dc_link); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_hotplug_event(dev); + + } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { amdgpu_dm_update_connector_after_detect(aconnector); @@ -1130,6 +1223,7 @@ static void handle_hpd_rx_irq(void *param) struct drm_device *dev = connector->dev; struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; + enum dc_connection_type new_connection_type = dc_connection_none; /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1142,7 +1236,24 @@ static void handle_hpd_rx_irq(void *param) if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && !is_mst_root_connector) { /* Downstream Port status changed. */ - if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { + if (!dc_link_detect_sink(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(dc_link); + + if (aconnector->fake_enable) + aconnector->fake_enable = false; + + amdgpu_dm_update_connector_after_detect(aconnector); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_hotplug_event(dev); + } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1214,7 +1325,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; - unsigned client_id = AMDGPU_IH_CLIENTID_LEGACY; + unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY; if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || @@ -1539,6 +1650,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) struct amdgpu_mode_info *mode_info = &adev->mode_info; uint32_t link_cnt; int32_t total_overlay_planes, total_primary_planes; + enum dc_connection_type new_connection_type = dc_connection_none; link_cnt = dm->dc->caps.max_links; if (amdgpu_dm_mode_config_init(dm->adev)) { @@ -1605,7 +1717,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) link = dc_get_link_at_index(dm->dc, i); - if (dc_link_detect(link, DETECT_REASON_BOOT)) { + if (!dc_link_detect_sink(link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(link); + amdgpu_dm_update_connector_after_detect(aconnector); + + } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); } @@ -2648,7 +2767,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (dm_state && dm_state->freesync_capable) stream->ignore_msa_timing_param = true; finish: - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) + if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) dc_sink_release(sink); return stream; @@ -4079,6 +4198,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, /* TODO eliminate or rename surface_update */ struct dc_surface_update surface_updates[1] = { {0} }; struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); + struct dc_stream_status *stream_status; /* Prepare wait for target vblank early - before the fence-waits */ @@ -4134,7 +4254,19 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0]; + stream_status = dc_stream_get_status(acrtc_state->stream); + if (!stream_status) { + DRM_ERROR("No stream status for CRTC: id=%d\n", + acrtc->crtc_id); + return; + } + + surface_updates->surface = stream_status->plane_states[0]; + if (!surface_updates->surface) { + DRM_ERROR("No surface for CRTC: id=%d\n", + acrtc->crtc_id); + return; + } surface_updates->flip_addr = &addr; dc_commit_updates_for_stream(adev->dm.dc, @@ -4608,12 +4740,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } spin_unlock_irqrestore(&adev->ddev->event_lock, flags); - /* Signal HW programming completion */ - drm_atomic_helper_commit_hw_done(state); if (wait_for_vblank) drm_atomic_helper_wait_for_flip_done(dev, state); + /* + * FIXME: + * Delay hw_done() until flip_done() is signaled. This is to block + * another commit from freeing the CRTC state while we're still + * waiting on flip_done. + */ + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_cleanup_planes(dev, state); /* @@ -4797,6 +4935,8 @@ void set_freesync_on_stream(struct amdgpu_display_manager *dm, mod_freesync_build_vrr_infopacket(dm->freesync_module, new_stream, &vrr, + packet_type_fs1, + NULL, &vrr_infopacket); new_crtc_state->adjust = vrr.adjust; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d4f1bdf93207..978b34a5011c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -108,8 +108,6 @@ struct amdgpu_display_manager { const struct dc_link *backlight_link; - struct work_struct mst_hotplug_work; - struct mod_freesync *freesync_module; /** diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 0ef4a40d2247..9a7ac58eb18e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -705,7 +705,8 @@ int connector_debugfs_init(struct amdgpu_dm_connector *connector) int i; struct dentry *ent, *dir = connector->base.debugfs_entry; - if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) { ent = debugfs_create_file(dp_debugfs_entries[i].name, 0644, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index a910f01838ab..a212178f2edc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -36,17 +36,13 @@ * Private declarations. *****************************************************************************/ -struct handler_common_data { +struct amdgpu_dm_irq_handler_data { struct list_head list; interrupt_handler handler; void *handler_arg; /* DM which this handler belongs to */ struct amdgpu_display_manager *dm; -}; - -struct amdgpu_dm_irq_handler_data { - struct handler_common_data hcd; /* DAL irq source which registered for this interrupt. */ enum dc_irq_source irq_source; }; @@ -61,7 +57,7 @@ struct amdgpu_dm_irq_handler_data { * Private functions. *****************************************************************************/ -static void init_handler_common_data(struct handler_common_data *hcd, +static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, void (*ih)(void *), void *args, struct amdgpu_display_manager *dm) @@ -85,11 +81,9 @@ static void dm_irq_work_func(struct work_struct *work) struct amdgpu_dm_irq_handler_data *handler_data; list_for_each(entry, handler_list) { - handler_data = - list_entry( - entry, - struct amdgpu_dm_irq_handler_data, - hcd.list); + handler_data = list_entry(entry, + struct amdgpu_dm_irq_handler_data, + list); DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", handler_data->irq_source); @@ -97,7 +91,7 @@ static void dm_irq_work_func(struct work_struct *work) DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", handler_data->irq_source); - handler_data->hcd.handler(handler_data->hcd.handler_arg); + handler_data->handler(handler_data->handler_arg); } /* Call a DAL subcomponent which registered for interrupt notification @@ -137,11 +131,11 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, list_for_each_safe(entry, tmp, hnd_list) { handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, - hcd.list); + list); if (ih == handler) { /* Found our handler. Remove it from the list. */ - list_del(&handler->hcd.list); + list_del(&handler->list); handler_removed = true; break; } @@ -230,8 +224,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, memset(handler_data, 0, sizeof(*handler_data)); - init_handler_common_data(&handler_data->hcd, ih, handler_args, - &adev->dm); + init_handler_common_data(handler_data, ih, handler_args, &adev->dm); irq_source = int_params->irq_source; @@ -250,7 +243,7 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, break; } - list_add_tail(&handler_data->hcd.list, hnd_list); + list_add_tail(&handler_data->list, hnd_list); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); @@ -462,15 +455,13 @@ static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, entry, &adev->dm.irq_handler_list_high_tab[irq_source]) { - handler_data = - list_entry( - entry, - struct amdgpu_dm_irq_handler_data, - hcd.list); + handler_data = list_entry(entry, + struct amdgpu_dm_irq_handler_data, + list); /* Call a subcomponent which registered for immediate * interrupt notification */ - handler_data->hcd.handler(handler_data->hcd.handler_arg); + handler_data->handler(handler_data->handler_arg); } DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 6d16b4a0353d..0fab64a2a915 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -105,6 +105,8 @@ bool dm_pp_apply_display_requirements( adev->powerplay.pp_funcs->display_configuration_change( adev->powerplay.pp_handle, &adev->pm.pm_display_cfg); + + amdgpu_pm_compute_clocks(adev); } return true; diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c index 5e2ea12fbb73..d0fc54f8fb1c 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c @@ -1625,11 +1625,11 @@ void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performan else { v->dsty_after_scaler = 0.0; } - v->v_update_offset_pix =dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); + v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk); - v->v_update_width_pix = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k]; - v->v_ready_offset_pix =dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k]; - v->t_setup = (v->v_update_offset_pix + v->v_update_width_pix + v->v_ready_offset_pix) / v->pixel_clock[k]; + v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k]; + v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k]; + v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k]; v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]); if (v->prefetch_mode == 0.0) { v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 80ec09eef44f..3208188b7ed4 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1096,9 +1096,9 @@ bool dcn_validate_bandwidth( if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) continue; - pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; - pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; - pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; + pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; @@ -1137,9 +1137,9 @@ bool dcn_validate_bandwidth( TIMING_3D_FORMAT_SIDE_BY_SIDE))) { if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { /* update previously split pipe */ - hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; - hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; - hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset[input_idx][v->dpp_per_plane[input_idx] == 2 ? 1 : 0]; + hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 1c438eedf77a..7c491c91465f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -60,6 +60,7 @@ #define DC_LOGGER \ dc->ctx->logger +const static char DC_BUILD_ID[] = "production-build"; /******************************************************************************* * Private functions @@ -460,9 +461,25 @@ void dc_link_set_preferred_link_settings(struct dc *dc, struct dc_link_settings *link_setting, struct dc_link *link) { + int i; + struct pipe_ctx *pipe; + struct dc_stream_state *link_stream; struct dc_link_settings store_settings = *link_setting; - struct dc_stream_state *link_stream = - link->dc->current_state->res_ctx.pipe_ctx[0].stream; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->sink + && pipe->stream->sink->link) { + if (pipe->stream->sink->link == link) + break; + } + } + + /* Stream not found */ + if (i == MAX_PIPES) + return; + + link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream; link->preferred_link_setting = store_settings; if (link_stream) @@ -742,6 +759,8 @@ struct dc *dc_create(const struct dc_init_data *init_params) dc->config = init_params->flags; + dc->build_id = DC_BUILD_ID; + DC_LOG_DC("Display Core initialized\n"); @@ -1094,32 +1113,6 @@ static bool is_surface_in_context( return false; } -static unsigned int pixel_format_to_bpp(enum surface_pixel_format format) -{ - switch (format) { - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - return 12; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - return 16; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - return 32; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - return 64; - default: - ASSERT_CRITICAL(false); - return -1; - } -} - static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u) { union surface_update_flags *update_flags = &u->surface->update_flags; @@ -1153,16 +1146,13 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch) update_flags->bits.dcc_change = 1; - if (pixel_format_to_bpp(u->plane_info->format) != - pixel_format_to_bpp(u->surface->format)) + if (resource_pixel_format_to_bpp(u->plane_info->format) != + resource_pixel_format_to_bpp(u->surface->format)) /* different bytes per element will require full bandwidth * and DML calculation */ update_flags->bits.bpp_change = 1; - if (u->gamma && dce_use_lut(u->plane_info->format)) - update_flags->bits.gamma_change = 1; - if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info, sizeof(union dc_tiling_info)) != 0) { update_flags->bits.swizzle_change = 1; @@ -1179,7 +1169,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa if (update_flags->bits.rotation_change || update_flags->bits.stereo_format_change || update_flags->bits.pixel_format_change - || update_flags->bits.gamma_change || update_flags->bits.bpp_change || update_flags->bits.bandwidth_change || update_flags->bits.output_tf_change) @@ -1269,13 +1258,26 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; + if (u->gamma) { + enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; + + if (u->plane_info) + format = u->plane_info->format; + else if (u->surface) + format = u->surface->format; + + if (dce_use_lut(format)) + update_flags->bits.gamma_change = 1; + } + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); } if (update_flags->bits.input_csc_change - || update_flags->bits.coeff_reduction_change) { + || update_flags->bits.coeff_reduction_change + || update_flags->bits.gamma_change) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -1379,7 +1381,7 @@ static void notify_display_count_to_smu( * sent as part of pplib_apply_display_requirements. * So just return. */ - if (!pp_smu->set_display_count) + if (!pp_smu || !pp_smu->set_display_count) return; display_count = 0; @@ -1834,3 +1836,16 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) } } } + +void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info) +{ + info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz; + info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz; + info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz; + info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz; + info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz; + info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz; + info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz; + info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz; + info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index bd58dbae7d3e..fb04a4ad141f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -198,7 +198,7 @@ static bool program_hpd_filter( return result; } -static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) +bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) { uint32_t is_hpd_high = 0; struct gpio *hpd_pin; @@ -612,7 +612,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) return false; - if (false == detect_sink(link, &new_connection_type)) { + if (false == dc_link_detect_sink(link, &new_connection_type)) { BREAK_TO_DEBUGGER(); return false; } @@ -2559,23 +2559,24 @@ void core_link_enable_stream( pipe_ctx->stream_res.stream_enc, &stream->timing); - resource_build_info_frame(pipe_ctx); - core_dc->hwss.update_info_frame(pipe_ctx); + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { + resource_build_info_frame(pipe_ctx); + core_dc->hwss.update_info_frame(pipe_ctx); - /* eDP lit up by bios already, no need to enable again. */ - if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && - pipe_ctx->stream->apply_edp_fast_boot_optimization) { - pipe_ctx->stream->apply_edp_fast_boot_optimization = false; - pipe_ctx->stream->dpms_off = false; - return; - } + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + pipe_ctx->stream->apply_edp_fast_boot_optimization) { + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + pipe_ctx->stream->dpms_off = false; + return; + } - if (pipe_ctx->stream->dpms_off) - return; + if (pipe_ctx->stream->dpms_off) + return; - status = enable_link(state, pipe_ctx); + status = enable_link(state, pipe_ctx); - if (status != DC_OK) { + if (status != DC_OK) { DC_LOG_WARNING("enabling link %u failed: %d\n", pipe_ctx->stream->sink->link->link_index, status); @@ -2590,23 +2591,26 @@ void core_link_enable_stream( BREAK_TO_DEBUGGER(); return; } - } + } - core_dc->hwss.enable_audio_stream(pipe_ctx); + core_dc->hwss.enable_audio_stream(pipe_ctx); - /* turn off otg test pattern if enable */ - if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) - pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, - CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, - COLOR_DEPTH_UNDEFINED); + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); - core_dc->hwss.enable_stream(pipe_ctx); + core_dc->hwss.enable_stream(pipe_ctx); - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + + core_dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->sink->link->cur_link_settings); + + } - core_dc->hwss.unblank_stream(pipe_ctx, - &pipe_ctx->stream->sink->link->cur_link_settings); } void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 2d6a4300bfa4..b6fe29b9fb65 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1975,6 +1975,9 @@ static void calculate_phy_pix_clks(struct dc_stream_state *stream) else stream->phy_pix_clk = stream->timing.pix_clk_khz; + + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + stream->phy_pix_clk *= 2; } enum dc_status resource_map_pool_resources( @@ -2096,6 +2099,14 @@ enum dc_status dc_validate_global_state( if (pipe_ctx->stream != stream) continue; + if (dc->res_pool->funcs->get_default_swizzle_mode && + pipe_ctx->plane_state && + pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) { + result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state); + if (result != DC_OK) + return result; + } + /* Switch to dp clock source only if there is * no non dp stream that shares the same timing * with the dp stream. @@ -2885,3 +2896,32 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla return res; } + +unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format) +{ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: + return 8; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + return 12; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + return 16; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + return 32; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + return 64; + default: + ASSERT_CRITICAL(false); + return -1; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7691139363a9..199527171100 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,13 +38,12 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.66" +#define DC_VER "3.1.68" #define MAX_SURFACES 3 #define MAX_STREAMS 6 #define MAX_SINKS_PER_LINK 4 - /******************************************************************************* * Display Core Interfaces ******************************************************************************/ @@ -208,6 +207,7 @@ struct dc_clocks { int dcfclk_deep_sleep_khz; int fclk_khz; int phyclk_khz; + int dramclk_khz; }; struct dc_debug_options { @@ -315,6 +315,8 @@ struct dc { struct compressor *fbc_compressor; struct dc_debug_data debug_data; + + const char *build_id; }; enum frame_buffer_mode { @@ -599,6 +601,8 @@ struct dc_validation_set { enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *plane_state); +void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info); + enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 57f57cf0fe2a..7825e4b5e97c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -289,7 +289,8 @@ enum swizzle_mode_values { DC_SW_VAR_S_X = 29, DC_SW_VAR_D_X = 30, DC_SW_VAR_R_X = 31, - DC_SW_MAX + DC_SW_MAX = 32, + DC_SW_UNKNOWN = DC_SW_MAX }; union dc_tiling_info { diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 438fb35d87b8..3bfdccceb524 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -216,6 +216,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable); bool dc_link_is_dp_sink_present(struct dc_link *link); +bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type); /* * DPCD access interfaces */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 4fb62780a696..6e12d640d020 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -659,4 +659,16 @@ enum i2c_mot_mode { I2C_MOT_FALSE }; +struct AsicStateEx { + unsigned int memoryClock; + unsigned int displayClock; + unsigned int engineClock; + unsigned int maxSupportedDppClock; + unsigned int dppClock; + unsigned int socClock; + unsigned int dcfClockDeepSleep; + unsigned int fClock; + unsigned int phyClock; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index 3f5b2e6f7553..aaeb7faac0c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -312,7 +312,7 @@ static void process_channel_reply( /* in case HPD is LOW, exit AUX transaction */ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON; + reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON; return; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index bf6261a1584b..d89a097ba936 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -468,6 +468,9 @@ static void dce12_update_clocks(struct dccg *dccg, { struct dm_pp_clock_for_voltage_req clock_voltage_req = {0}; + /* TODO: Investigate why this is needed to fix display corruption. */ + new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) { clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK; clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz; @@ -661,6 +664,11 @@ static void dce_update_clocks(struct dccg *dccg, bool safe_to_lower) { struct dm_pp_power_level_change_request level_change_req; + struct dce_dccg *clk_dce = TO_DCE_CLOCKS(dccg); + + /* TODO: Investigate why this is needed to fix display corruption. */ + if (!clk_dce->dfs_bypass_active) + new_clocks->dispclk_khz = new_clocks->dispclk_khz * 115 / 100; level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 4942590e8b9c..366bc8c2c643 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -662,21 +662,10 @@ bool dce110_link_encoder_validate_dp_output( const struct dce110_link_encoder *enc110, const struct dc_crtc_timing *crtc_timing) { - /* default RGB only */ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) - return true; - - if (enc110->base.features.flags.bits.IS_YCBCR_CAPABLE) - return true; - - /* for DCE 8.x or later DP Y-only feature, - * we need ASIC cap + FeatureSupportDPYonly, not support 666 */ - if (crtc_timing->flags.Y_ONLY && - enc110->base.features.flags.bits.IS_YCBCR_CAPABLE && - crtc_timing->display_color_depth != COLOR_DEPTH_666) - return true; + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + return false; - return false; + return true; } void dce110_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index b1cc38827f09..14754a87156c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -551,8 +551,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 300000, .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true + .flags.bits.IS_TPS3_CAPABLE = true }; struct link_encoder *dce100_link_encoder_create( @@ -690,7 +689,9 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index dc1eed5ba996..b75ede5f84f7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1377,26 +1377,13 @@ static enum dc_status apply_single_controller_ctx_to_hw( /* */ dc->hwss.enable_stream_timing(pipe_ctx, context, dc); - /* FPGA does not program backend */ - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { - pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( - pipe_ctx->stream_res.opp, - COLOR_SPACE_YCBCR601, - stream->timing.display_color_depth, - pipe_ctx->stream->signal); - - pipe_ctx->stream_res.opp->funcs->opp_program_fmt( - pipe_ctx->stream_res.opp, - &stream->bit_depth_params, - &stream->clamping); - return DC_OK; - } /* TODO: move to stream encoder */ if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) if (DC_OK != bios_parser_crtc_source_select(pipe_ctx)) { BREAK_TO_DEBUGGER(); return DC_ERROR_UNEXPECTED; } + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( pipe_ctx->stream_res.opp, COLOR_SPACE_YCBCR601, @@ -2550,7 +2537,7 @@ static void pplib_apply_display_requirements( dc->prev_display_config = *pp_display_cfg; } -void dce110_set_bandwidth( +static void dce110_set_bandwidth( struct dc *dc, struct dc_state *context, bool decrease_allowed) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h index e4c5db75c4c6..d6db3dbd9015 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h @@ -68,11 +68,6 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg); -void dce110_set_bandwidth( - struct dc *dc, - struct dc_state *context, - bool decrease_allowed); - uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); void dp_receiver_power_ctrl(struct dc_link *link, bool on); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index b44cc7042249..de190935f0a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -570,8 +570,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 594000, .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true + .flags.bits.IS_TPS3_CAPABLE = true }; static struct link_encoder *dce110_link_encoder_create( @@ -720,7 +719,9 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 0f8332ea1160..3ce79c208ddf 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -555,8 +555,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true + .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dce112_link_encoder_create( @@ -694,9 +693,6 @@ static void destruct(struct dce110_resource_pool *pool) if (pool->base.opps[i] != NULL) dce110_opp_destroy(&pool->base.opps[i]); - if (pool->base.engines[i] != NULL) - dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.transforms[i] != NULL) dce112_transform_destroy(&pool->base.transforms[i]); @@ -712,6 +708,11 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { + if (pool->base.engines[i] != NULL) + dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { kfree(pool->base.hw_i2cs[i]); pool->base.hw_i2cs[i] = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c index 5853522a6182..eb0f5f9a973b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c @@ -244,17 +244,6 @@ static void dce120_update_dchub( dh_data->dchub_info_valid = false; } -static void dce120_set_bandwidth( - struct dc *dc, - struct dc_state *context, - bool decrease_allowed) -{ - if (context->stream_count <= 0) - return; - - dce110_set_bandwidth(dc, context, decrease_allowed); -} - void dce120_hw_sequencer_construct(struct dc *dc) { /* All registers used by dce11.2 match those in dce11 in offset and @@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc) dce110_hw_sequencer_construct(dc); dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; dc->hwss.update_dchub = dce120_update_dchub; - dc->hwss.set_bandwidth = dce120_set_bandwidth; } diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 59055801af44..79ab5f9f9115 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -533,7 +533,9 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { @@ -609,7 +611,6 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, .flags.bits.IS_TPS4_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true }; static struct link_encoder *dce120_link_encoder_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 1dc590ccc5f9..d68f951f9869 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -650,8 +650,7 @@ static const struct encoder_feature_support link_enc_feature = { .max_hdmi_deep_color = COLOR_DEPTH_121212, .max_hdmi_pixel_clock = 297000, .flags.bits.IS_HBR2_CAPABLE = true, - .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true + .flags.bits.IS_TPS3_CAPABLE = true }; struct link_encoder *dce80_link_encoder_create( @@ -739,7 +738,9 @@ static void destruct(struct dce110_resource_pool *pool) kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 1ea91e153d3a..4254e7e1a509 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -87,6 +87,23 @@ void hubbub1_wm_read_state(struct hubbub *hubbub, s->dram_clk_chanage = REG_READ(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D); } +void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub) +{ + REG_UPDATE(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, 0); +} + +bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubbub) +{ + uint32_t enable = 0; + + REG_GET(DCHUBBUB_ARB_DRAM_STATE_CNTL, + DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE, &enable); + + return true ? false : enable; +} + + bool hubbub1_verify_allow_pstate_change_high( struct hubbub *hubbub) { @@ -116,7 +133,43 @@ bool hubbub1_verify_allow_pstate_change_high( forced_pstate_allow = false; } - /* RV1: + /* RV2: + * dchubbubdebugind, at: 0xB + * description + * 0: Pipe0 Plane0 Allow Pstate Change + * 1: Pipe0 Plane1 Allow Pstate Change + * 2: Pipe0 Cursor0 Allow Pstate Change + * 3: Pipe0 Cursor1 Allow Pstate Change + * 4: Pipe1 Plane0 Allow Pstate Change + * 5: Pipe1 Plane1 Allow Pstate Change + * 6: Pipe1 Cursor0 Allow Pstate Change + * 7: Pipe1 Cursor1 Allow Pstate Change + * 8: Pipe2 Plane0 Allow Pstate Change + * 9: Pipe2 Plane1 Allow Pstate Change + * 10: Pipe2 Cursor0 Allow Pstate Change + * 11: Pipe2 Cursor1 Allow Pstate Change + * 12: Pipe3 Plane0 Allow Pstate Change + * 13: Pipe3 Plane1 Allow Pstate Change + * 14: Pipe3 Cursor0 Allow Pstate Change + * 15: Pipe3 Cursor1 Allow Pstate Change + * 16: Pipe4 Plane0 Allow Pstate Change + * 17: Pipe4 Plane1 Allow Pstate Change + * 18: Pipe4 Cursor0 Allow Pstate Change + * 19: Pipe4 Cursor1 Allow Pstate Change + * 20: Pipe5 Plane0 Allow Pstate Change + * 21: Pipe5 Plane1 Allow Pstate Change + * 22: Pipe5 Cursor0 Allow Pstate Change + * 23: Pipe5 Cursor1 Allow Pstate Change + * 24: Pipe6 Plane0 Allow Pstate Change + * 25: Pipe6 Plane1 Allow Pstate Change + * 26: Pipe6 Cursor0 Allow Pstate Change + * 27: Pipe6 Cursor1 Allow Pstate Change + * 28: WB0 Allow Pstate Change + * 29: WB1 Allow Pstate Change + * 30: Arbiter's allow_pstate_change + * 31: SOC pstate change request" + * + * RV1: * dchubbubdebugind, at: 0x7 * description "3-0: Pipe0 cursor0 QOS * 7-4: Pipe1 cursor0 QOS @@ -140,7 +193,6 @@ bool hubbub1_verify_allow_pstate_change_high( * 31: SOC pstate change request */ - REG_WRITE(DCHUBBUB_TEST_DEBUG_INDEX, hubbub->debug_test_index_pstate); for (i = 0; i < pstate_wait_timeout_us; i++) { @@ -802,5 +854,9 @@ void hubbub1_construct(struct hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0x7; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (ctx->dce_version == DCN_VERSION_1_01) + hubbub->debug_test_index_pstate = 0xB; +#endif } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index d6e596eef4c5..d0f03d152913 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -203,6 +203,10 @@ void hubbub1_program_watermarks( unsigned int refclk_mhz, bool safe_to_lower); +void hubbub1_disable_allow_self_refresh(struct hubbub *hubbub); + +bool hububu1_is_allow_self_refresh_enabled(struct hubbub *hubub); + void hubbub1_toggle_watermark_change_req( struct hubbub *hubbub); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 6bd4ec39f869..193184affefb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -44,6 +44,7 @@ #include "dcn10_hubp.h" #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" +#include "dc_link_dp.h" #define DC_LOGGER_INIT(logger) @@ -996,7 +997,21 @@ static void dcn10_init_hw(struct dc *dc) } else { if (!dcb->funcs->is_accelerated_mode(dcb)) { + bool allow_self_fresh_force_enable = + hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub); + bios_golden_init(dc); + + /* WA for making DF sleep when idle after resume from S0i3. + * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by + * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 + * before calling command table and it changed to 1 after, + * it should be set back to 0. + */ + if (allow_self_fresh_force_enable == false && + hububu1_is_allow_self_refresh_enabled(dc->res_pool->hubbub)) + hubbub1_disable_allow_self_refresh(dc->res_pool->hubbub); + disable_vga(dc->hwseq); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c index 6f675206a136..ba6a8686062f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -606,22 +606,10 @@ bool dcn10_link_encoder_validate_dp_output( const struct dcn10_link_encoder *enc10, const struct dc_crtc_timing *crtc_timing) { - /* default RGB only */ - if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) - return true; - - if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE) - return true; - - /* for DCE 8.x or later DP Y-only feature, - * we need ASIC cap + FeatureSupportDPYonly, not support 666 - */ - if (crtc_timing->flags.Y_ONLY && - enc10->base.features.flags.bits.IS_YCBCR_CAPABLE && - crtc_timing->display_color_depth != COLOR_DEPTH_666) - return true; + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + return false; - return false; + return true; } void dcn10_link_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 411f89218e01..54626682bab2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -98,7 +98,6 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c struct dc_crtc_timing patched_crtc_timing; int vesa_sync_start; int asic_blank_end; - int interlace_factor; int vertical_line_start; patched_crtc_timing = *dc_crtc_timing; @@ -112,16 +111,13 @@ static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_c vesa_sync_start - patched_crtc_timing.h_border_left; - interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1; - vesa_sync_start = patched_crtc_timing.v_addressable + patched_crtc_timing.v_border_bottom + patched_crtc_timing.v_front_porch; asic_blank_end = (patched_crtc_timing.v_total - vesa_sync_start - - patched_crtc_timing.v_border_top) - * interlace_factor; + patched_crtc_timing.v_border_top); vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; if (vertical_line_start < 0) { @@ -154,7 +150,7 @@ void optc1_program_vline_interrupt( req_delta_lines--; if (req_delta_lines > vsync_line) - start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1; + start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) + 2; else start_line = vsync_line - req_delta_lines; @@ -186,7 +182,6 @@ void optc1_program_timing( uint32_t v_sync_end; uint32_t v_init, v_fp2; uint32_t h_sync_polarity, v_sync_polarity; - uint32_t interlace_factor; uint32_t start_point = 0; uint32_t field_num = 0; uint32_t h_div_2; @@ -237,16 +232,8 @@ void optc1_program_timing( REG_UPDATE(OTG_H_SYNC_A_CNTL, OTG_H_SYNC_A_POL, h_sync_polarity); - /* Load vertical timing */ + v_total = patched_crtc_timing.v_total - 1; - /* CRTC_V_TOTAL = v_total - 1 */ - if (patched_crtc_timing.flags.INTERLACE) { - interlace_factor = 2; - v_total = 2 * patched_crtc_timing.v_total; - } else { - interlace_factor = 1; - v_total = patched_crtc_timing.v_total - 1; - } REG_SET(OTG_V_TOTAL, 0, OTG_V_TOTAL, v_total); @@ -259,7 +246,7 @@ void optc1_program_timing( OTG_V_TOTAL_MIN, v_total); /* v_sync_start = 0, v_sync_end = v_sync_width */ - v_sync_end = patched_crtc_timing.v_sync_width * interlace_factor; + v_sync_end = patched_crtc_timing.v_sync_width; REG_UPDATE_2(OTG_V_SYNC_A, OTG_V_SYNC_A_START, 0, @@ -271,15 +258,13 @@ void optc1_program_timing( asic_blank_end = (patched_crtc_timing.v_total - vesa_sync_start - - patched_crtc_timing.v_border_top) - * interlace_factor; + patched_crtc_timing.v_border_top); /* v_blank_start = v_blank_end + v_active */ asic_blank_start = asic_blank_end + (patched_crtc_timing.v_border_top + patched_crtc_timing.v_addressable + - patched_crtc_timing.v_border_bottom) - * interlace_factor; + patched_crtc_timing.v_border_bottom); REG_UPDATE_2(OTG_V_BLANK_START_END, OTG_V_BLANK_START, asic_blank_start, @@ -301,7 +286,7 @@ void optc1_program_timing( 0 : 1; REG_UPDATE(OTG_V_SYNC_A_CNTL, - OTG_V_SYNC_A_POL, v_sync_polarity); + OTG_V_SYNC_A_POL, v_sync_polarity); v_init = asic_blank_start; if (optc->dlg_otg_param.signal == SIGNAL_TYPE_DISPLAY_PORT || @@ -532,7 +517,6 @@ bool optc1_validate_timing( struct timing_generator *optc, const struct dc_crtc_timing *timing) { - uint32_t interlace_factor; uint32_t v_blank; uint32_t h_blank; uint32_t min_v_blank; @@ -540,10 +524,8 @@ bool optc1_validate_timing( ASSERT(timing != NULL); - interlace_factor = timing->flags.INTERLACE ? 2 : 1; v_blank = (timing->v_total - timing->v_addressable - - timing->v_border_top - timing->v_border_bottom) * - interlace_factor; + timing->v_border_top - timing->v_border_bottom); h_blank = (timing->h_total - timing->h_addressable - timing->h_border_right - diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index cb1b134b8fcb..a71453a15ae3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -507,6 +507,18 @@ static const struct resource_caps res_cap = { .num_ddc = 4, }; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) +static const struct resource_caps rv2_res_cap = { + .num_timing_generator = 3, + .num_opp = 3, + .num_video_plane = 3, + .num_audio = 3, + .num_stream_encoder = 3, + .num_pll = 3, + .num_ddc = 3, +}; +#endif + static const struct dc_debug_options debug_defaults_drv = { .sanity_checks = true, .disable_dmcu = true, @@ -711,8 +723,7 @@ static const struct encoder_feature_support link_enc_feature = { .flags.bits.IS_HBR2_CAPABLE = true, .flags.bits.IS_HBR3_CAPABLE = true, .flags.bits.IS_TPS3_CAPABLE = true, - .flags.bits.IS_TPS4_CAPABLE = true, - .flags.bits.IS_YCBCR_CAPABLE = true + .flags.bits.IS_TPS4_CAPABLE = true }; struct link_encoder *dcn10_link_encoder_create( @@ -897,7 +908,9 @@ static void destruct(struct dcn10_resource_pool *pool) kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i])); pool->base.timing_generators[i] = NULL; } + } + for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]); if (pool->base.hw_i2cs[i] != NULL) { @@ -1119,6 +1132,24 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st return DC_OK; } +static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state) +{ + enum dc_status result = DC_OK; + + enum surface_pixel_format surf_pix_format = plane_state->format; + unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format); + + enum swizzle_mode_values swizzle = DC_SW_LINEAR; + + if (bpp == 64) + swizzle = DC_SW_64KB_D; + else + swizzle = DC_SW_64KB_S; + + plane_state->tiling_info.gfx9.swizzle = swizzle; + return result; +} + static const struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn10_get_dcc_compression_cap }; @@ -1129,7 +1160,8 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, - .add_stream_to_ctx = dcn10_add_stream_to_ctx + .add_stream_to_ctx = dcn10_add_stream_to_ctx, + .get_default_swizzle_mode = dcn10_get_default_swizzle_mode }; static uint32_t read_pipe_fuses(struct dc_context *ctx) @@ -1152,7 +1184,12 @@ static bool construct( ctx->dc_bios->regs = &bios_regs; - pool->base.res_cap = &res_cap; +#if defined(CONFIG_DRM_AMD_DC_DCN1_01) + if (ctx->dce_version == DCN_VERSION_1_01) + pool->base.res_cap = &rv2_res_cap; + else +#endif + pool->base.res_cap = &res_cap; pool->base.funcs = &dcn10_res_pool_funcs; /* diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c index 8eee8ace1259..59c3ed43d609 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/aux_engine_dce110.c @@ -346,7 +346,7 @@ static void process_channel_reply( /* in case HPD is LOW, exit AUX transaction */ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) { - reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON; + reply->status = AUX_TRANSACTION_REPLY_HPD_DISCON; return; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 831a1bdf622c..c1976c175b57 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -120,6 +120,9 @@ struct resource_funcs { struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream); + enum dc_status (*get_default_swizzle_mode)( + struct dc_plane_state *plane_state); + }; struct audio_support{ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index ddbb673caa08..e688eb9b975c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -504,10 +504,10 @@ struct dcn_bw_internal_vars { float prefetch_mode; float dstx_after_scaler; float dsty_after_scaler; - float v_update_offset_pix; + float v_update_offset_pix[number_of_planes_minus_one + 1]; float total_repeater_delay_time; - float v_update_width_pix; - float v_ready_offset_pix; + float v_update_width_pix[number_of_planes_minus_one + 1]; + float v_ready_offset_pix[number_of_planes_minus_one + 1]; float t_setup; float t_wait; float bandwidth_available_for_immediate_flip; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 58818920ed41..e28e9770e0a3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -58,7 +58,6 @@ struct encoder_feature_support { uint32_t IS_HBR3_CAPABLE:1; uint32_t IS_TPS3_CAPABLE:1; uint32_t IS_TPS4_CAPABLE:1; - uint32_t IS_YCBCR_CAPABLE:1; uint32_t HDMI_6GB_EN:1; } bits; uint32_t raw; diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 76d00c6dbca9..33b99e3ab10d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -172,4 +172,7 @@ void update_audio_usage( const struct resource_pool *pool, struct audio *audio, bool acquired); + +unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); + #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h index 03476b142d8e..f56d2891475f 100644 --- a/drivers/gpu/drm/amd/display/include/signal_types.h +++ b/drivers/gpu/drm/amd/display/include/signal_types.h @@ -102,4 +102,9 @@ static inline bool dc_is_audio_capable_signal(enum signal_type signal) dc_is_hdmi_signal(signal)); } +static inline bool dc_is_virtual_signal(enum signal_type signal) +{ + return (signal == SIGNAL_TYPE_VIRTUAL); +} + #endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 15427f4fc990..cdcefd087487 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1069,10 +1069,14 @@ static void build_evenly_distributed_points( struct dividers dividers) { struct gamma_pixel *p = points; - struct gamma_pixel *p_last = p + numberof_points - 1; + struct gamma_pixel *p_last; uint32_t i = 0; + // This function should not gets called with 0 as a parameter + ASSERT(numberof_points > 0); + p_last = p + numberof_points - 1; + do { struct fixed31_32 value = dc_fixpt_from_fraction(i, numberof_points - 1); @@ -1083,7 +1087,7 @@ static void build_evenly_distributed_points( ++p; ++i; - } while (i != numberof_points); + } while (i < numberof_points); p->r = dc_fixpt_div(p_last->r, dividers.divider1); p->g = dc_fixpt_div(p_last->g, dividers.divider1); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index e1688902a1b0..4018c7180d00 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -480,22 +480,11 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync, return false; } -void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, - const struct dc_stream_state *stream, - const struct mod_vrr_params *vrr, - struct dc_info_packet *infopacket) +static void build_vrr_infopacket_header_v1(enum signal_type signal, + struct dc_info_packet *infopacket, + unsigned int *payload_size) { - /* SPD info packet for FreeSync */ - unsigned char checksum = 0; - unsigned int idx, payload_size = 0; - - /* Check if Freesync is supported. Return if false. If true, - * set the corresponding bit in the info packet - */ - if (!vrr->supported || !vrr->send_vsif) - return; - - if (dc_is_hdmi_signal(stream->signal)) { + if (dc_is_hdmi_signal(signal)) { /* HEADER */ @@ -510,9 +499,9 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x08] */ infopacket->hb2 = 0x08; - payload_size = 0x08; + *payload_size = 0x08; - } else if (dc_is_dp_signal(stream->signal)) { + } else if (dc_is_dp_signal(signal)) { /* HEADER */ @@ -536,9 +525,62 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, */ infopacket->hb3 = 0x04; - payload_size = 0x1B; + *payload_size = 0x1B; } +} + +static void build_vrr_infopacket_header_v2(enum signal_type signal, + struct dc_info_packet *infopacket, + unsigned int *payload_size) +{ + if (dc_is_hdmi_signal(signal)) { + + /* HEADER */ + + /* HB0 = Packet Type = 0x83 (Source Product + * Descriptor InfoFrame) + */ + infopacket->hb0 = DC_HDMI_INFOFRAME_TYPE_SPD; + + /* HB1 = Version = 0x02 */ + infopacket->hb1 = 0x02; + + /* HB2 = [Bits 7:5 = 0] [Bits 4:0 = Length = 0x09] */ + infopacket->hb2 = 0x09; + + *payload_size = 0x0A; + } else if (dc_is_dp_signal(signal)) { + + /* HEADER */ + + /* HB0 = Secondary-data Packet ID = 0 - Only non-zero + * when used to associate audio related info packets + */ + infopacket->hb0 = 0x00; + + /* HB1 = Packet Type = 0x83 (Source Product + * Descriptor InfoFrame) + */ + infopacket->hb1 = DC_HDMI_INFOFRAME_TYPE_SPD; + + /* HB2 = [Bits 7:0 = Least significant eight bits - + * For INFOFRAME, the value must be 1Bh] + */ + infopacket->hb2 = 0x1B; + + /* HB3 = [Bits 7:2 = INFOFRAME SDP Version Number = 0x2] + * [Bits 1:0 = Most significant two bits = 0x00] + */ + infopacket->hb3 = 0x08; + + *payload_size = 0x1B; + } +} + +static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) +{ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */ infopacket->sb[1] = 0x1A; @@ -576,15 +618,39 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, */ infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000); - /* PB9 - PB27 = Reserved */ + //FreeSync HDR + infopacket->sb[9] = 0; + infopacket->sb[10] = 0; +} + +static void build_vrr_infopacket_fs2_data(enum color_transfer_func app_tf, + struct dc_info_packet *infopacket) +{ + if (app_tf != transfer_func_unknown) { + infopacket->valid = true; + + infopacket->sb[6] |= 0x08; // PB6 = [Bit 3 = Native Color Active] + + if (app_tf == transfer_func_gamma_22) { + infopacket->sb[9] |= 0x04; // PB6 = [Bit 2 = Gamma 2.2 EOTF Active] + } + } +} + +static void build_vrr_infopacket_checksum(unsigned int *payload_size, + struct dc_info_packet *infopacket) +{ /* Calculate checksum */ + unsigned int idx = 0; + unsigned char checksum = 0; + checksum += infopacket->hb0; checksum += infopacket->hb1; checksum += infopacket->hb2; checksum += infopacket->hb3; - for (idx = 1; idx <= payload_size; idx++) + for (idx = 1; idx <= *payload_size; idx++) checksum += infopacket->sb[idx]; /* PB0 = Checksum (one byte complement) */ @@ -593,6 +659,64 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, infopacket->valid = true; } +static void build_vrr_infopacket_v1(enum signal_type signal, + const struct mod_vrr_params *vrr, + struct dc_info_packet *infopacket) +{ + /* SPD info packet for FreeSync */ + unsigned int payload_size = 0; + + build_vrr_infopacket_header_v1(signal, infopacket, &payload_size); + build_vrr_infopacket_data(vrr, infopacket); + build_vrr_infopacket_checksum(&payload_size, infopacket); + + infopacket->valid = true; +} + +static void build_vrr_infopacket_v2(enum signal_type signal, + const struct mod_vrr_params *vrr, + const enum color_transfer_func *app_tf, + struct dc_info_packet *infopacket) +{ + unsigned int payload_size = 0; + + build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); + build_vrr_infopacket_data(vrr, infopacket); + + if (app_tf != NULL) + build_vrr_infopacket_fs2_data(*app_tf, infopacket); + + build_vrr_infopacket_checksum(&payload_size, infopacket); + + infopacket->valid = true; +} + +void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, + const struct dc_stream_state *stream, + const struct mod_vrr_params *vrr, + enum vrr_packet_type packet_type, + const enum color_transfer_func *app_tf, + struct dc_info_packet *infopacket) +{ + /* SPD info packet for FreeSync */ + + /* Check if Freesync is supported. Return if false. If true, + * set the corresponding bit in the info packet + */ + if (!vrr->supported || !vrr->send_vsif) + return; + + switch (packet_type) { + case packet_type_fs2: + build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket); + break; + case packet_type_vrr: + case packet_type_fs1: + default: + build_vrr_infopacket_v1(stream->signal, vrr, infopacket); + } +} + void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, struct mod_freesync_config *in_config, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index a0f32cde721c..949a8b62aa98 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -54,7 +54,7 @@ #ifndef MOD_FREESYNC_H_ #define MOD_FREESYNC_H_ -#include "dm_services.h" +#include "mod_shared.h" // Access structures struct mod_freesync { @@ -144,6 +144,8 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, const struct dc_stream_state *stream, const struct mod_vrr_params *vrr, + enum vrr_packet_type packet_type, + const enum color_transfer_func *app_tf, struct dc_info_packet *infopacket); void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h new file mode 100644 index 000000000000..238c431ae483 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h @@ -0,0 +1,49 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef MOD_SHARED_H_ +#define MOD_SHARED_H_ + +enum color_transfer_func { + transfer_func_unknown, + transfer_func_srgb, + transfer_func_bt709, + transfer_func_pq2084, + transfer_func_pq2084_interim, + transfer_func_linear_0_1, + transfer_func_linear_0_125, + transfer_func_dolbyvision, + transfer_func_gamma_22, + transfer_func_gamma_26 +}; + +enum vrr_packet_type { + packet_type_vrr, + packet_type_fs1, + packet_type_fs2 +}; + +#endif /* MOD_SHARED_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 52378fc69079..ff8bfb9b43b0 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -48,9 +48,12 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, unsigned int i; unsigned int pixelEncoding = 0; unsigned int colorimetryFormat = 0; + bool stereo3dSupport = false; - if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) + if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) { vscPacketRevision = 1; + stereo3dSupport = true; + } /*VSC packet set to 2 when DP revision >= 1.2*/ if (stream->psr_version != 0) @@ -94,12 +97,59 @@ static void mod_build_vsc_infopacket(const struct dc_stream_state *stream, info_packet->hb2 = 0x01; // 01h = Revision number. VSC SDP supporting 3D stereo only info_packet->hb3 = 0x01; // 01h = VSC SDP supporting 3D stereo only (HB2 = 01h). - if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_INBAND_FA) - info_packet->sb[0] = 0x1; - info_packet->valid = true; } + if (stereo3dSupport) { + /* ==============================================================================================================| + * A. STEREO 3D + * ==============================================================================================================| + * VSC Payload (1 byte) From DP1.2 spec + * + * Bits 3:0 (Stereo Interface Method Code) | Bits 7:4 (Stereo Interface Method Specific Parameter) + * ----------------------------------------------------------------------------------------------------- + * 0 = Non Stereo Video | Must be set to 0x0 + * ----------------------------------------------------------------------------------------------------- + * 1 = Frame/Field Sequential | 0x0: L + R view indication based on MISC1 bit 2:1 + * | 0x1: Right when Stereo Signal = 1 + * | 0x2: Left when Stereo Signal = 1 + * | (others reserved) + * ----------------------------------------------------------------------------------------------------- + * 2 = Stacked Frame | 0x0: Left view is on top and right view on bottom + * | (others reserved) + * ----------------------------------------------------------------------------------------------------- + * 3 = Pixel Interleaved | 0x0: horiz interleaved, right view pixels on even lines + * | 0x1: horiz interleaved, right view pixels on odd lines + * | 0x2: checker board, start with left view pixel + * | 0x3: vertical interleaved, start with left view pixels + * | 0x4: vertical interleaved, start with right view pixels + * | (others reserved) + * ----------------------------------------------------------------------------------------------------- + * 4 = Side-by-side | 0x0: left half represents left eye view + * | 0x1: left half represents right eye view + */ + switch (stream->timing.timing_3d_format) { + case TIMING_3D_FORMAT_HW_FRAME_PACKING: + case TIMING_3D_FORMAT_SW_FRAME_PACKING: + case TIMING_3D_FORMAT_TOP_AND_BOTTOM: + case TIMING_3D_FORMAT_TB_SW_PACKED: + info_packet->sb[0] = 0x02; // Stacked Frame, Left view is on top and right view on bottom. + break; + case TIMING_3D_FORMAT_DP_HDMI_INBAND_FA: + case TIMING_3D_FORMAT_INBAND_FA: + info_packet->sb[0] = 0x01; // Frame/Field Sequential, L + R view indication based on MISC1 bit 2:1 + break; + case TIMING_3D_FORMAT_SIDE_BY_SIDE: + case TIMING_3D_FORMAT_SBS_SW_PACKED: + info_packet->sb[0] = 0x04; // Side-by-side + break; + default: + info_packet->sb[0] = 0x00; // No Stereo Video, Shall be cleared to 0x0. + break; + } + + } + /* 05h = VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/Colorimetry Format indication. * Added in DP1.3, a DP Source device is allowed to indicate the pixel encoding/colorimetry * format to the DP Sink device with VSC SDP only when the DP Sink device supports it diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 3d4c1b1ab8c4..03121ca64fe4 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -186,12 +186,8 @@ void mod_stats_destroy(struct mod_stats *mod_stats) if (mod_stats != NULL) { struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats); - if (core_stats->time != NULL) - kfree(core_stats->time); - - if (core_stats->events != NULL) - kfree(core_stats->events); - + kfree(core_stats->time); + kfree(core_stats->events); kfree(core_stats); } } diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 86b167ec9863..2083c308007c 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -109,6 +109,7 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) #define AMD_PG_SUPPORT_MMHUB (1 << 13) #define AMD_PG_SUPPORT_VCN (1 << 14) +#define AMD_PG_SUPPORT_VCN_DPG (1 << 15) enum PP_FEATURE_MASK { PP_SCLK_DPM_MASK = 0x1, diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index 4ce090db7ef7..529b37db274c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -2449,6 +2449,8 @@ #define mmCP_ECC_FIRSTOCCURRENCE_RING2_BASE_IDX 0 #define mmGB_EDC_MODE 0x107e #define mmGB_EDC_MODE_BASE_IDX 0 +#define mmCP_DEBUG 0x107f +#define mmCP_DEBUG_BASE_IDX 0 #define mmCP_CPF_DEBUG 0x1080 #define mmCP_PQ_WPTR_POLL_CNTL 0x1083 #define mmCP_PQ_WPTR_POLL_CNTL_BASE_IDX 0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h index efd2704d0f8f..0d6891095f62 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_offset.h @@ -175,4 +175,7 @@ #define mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX 0 #define mmSMUSVI0_PLANE0_CURRENTVID 0x0013 +#define mmSMUSVI0_TEL_PLANE0_BASE_IDX 0 +#define mmSMUSVI0_TEL_PLANE0 0x0004 + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h index 2487ab9621e9..b1d9d8be1119 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_9_0_sh_mask.h @@ -258,4 +258,7 @@ #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT 0x18 #define SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK 0xFF000000L +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT 0x10 +#define SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK 0x01FF0000L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h index 510ec3c70626..a9eb57a53e59 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h @@ -26,6 +26,18 @@ #define mmCG_MULT_THERMAL_STATUS 0x005f #define mmCG_MULT_THERMAL_STATUS_BASE_IDX 0 +#define mmCG_FDO_CTRL0 0x0067 +#define mmCG_FDO_CTRL0_BASE_IDX 0 + +#define mmCG_FDO_CTRL1 0x0068 +#define mmCG_FDO_CTRL1_BASE_IDX 0 + +#define mmCG_FDO_CTRL2 0x0069 +#define mmCG_FDO_CTRL2_BASE_IDX 0 + +#define mmCG_TACH_CTRL 0x006a +#define mmCG_TACH_CTRL_BASE_IDX 0 + #define mmTHM_THERMAL_INT_ENA 0x000a #define mmTHM_THERMAL_INT_ENA_BASE_IDX 0 #define mmTHM_THERMAL_INT_CTRL 0x000b diff --git a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h index f69533fa6abf..d130d92aee19 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h @@ -28,6 +28,16 @@ #define CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT 0x9 #define CG_MULT_THERMAL_STATUS__ASIC_MAX_TEMP_MASK 0x000001FFL #define CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK 0x0003FE00L +#define CG_FDO_CTRL2__TMIN__SHIFT 0x0 +#define CG_FDO_CTRL2__TMIN_MASK 0x000000FFL +#define CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT 0xb +#define CG_FDO_CTRL2__FDO_PWM_MODE_MASK 0x00003800L +#define CG_FDO_CTRL1__FMAX_DUTY100__SHIFT 0x0 +#define CG_FDO_CTRL1__FMAX_DUTY100_MASK 0x000000FFL +#define CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT 0x0 +#define CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK 0x000000FFL +#define CG_TACH_CTRL__TARGET_PERIOD__SHIFT 0x3 +#define CG_TACH_CTRL__TARGET_PERIOD_MASK 0xFFFFFFF8L //THM_THERMAL_INT_ENA #define THM_THERMAL_INT_ENA__THERM_INTH_SET__SHIFT 0x0 diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h index 216a401028de..442ca7c471a5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h @@ -33,6 +33,14 @@ #define mmUVD_POWER_STATUS_BASE_IDX 1 #define mmCC_UVD_HARVESTING 0x00c7 #define mmCC_UVD_HARVESTING_BASE_IDX 1 +#define mmUVD_DPG_LMA_CTL 0x00d1 +#define mmUVD_DPG_LMA_CTL_BASE_IDX 1 +#define mmUVD_DPG_LMA_DATA 0x00d2 +#define mmUVD_DPG_LMA_DATA_BASE_IDX 1 +#define mmUVD_DPG_LMA_MASK 0x00d3 +#define mmUVD_DPG_LMA_MASK_BASE_IDX 1 +#define mmUVD_DPG_PAUSE 0x00d4 +#define mmUVD_DPG_PAUSE_BASE_IDX 1 #define mmUVD_SCRATCH1 0x00d5 #define mmUVD_SCRATCH1_BASE_IDX 1 #define mmUVD_SCRATCH2 0x00d6 @@ -74,6 +82,18 @@ #define mmUVD_LCM_CGC_CNTRL 0x0123 #define mmUVD_LCM_CGC_CNTRL_BASE_IDX 1 +#define mmUVD_MIF_CURR_UV_ADDR_CONFIG 0x0184 +#define mmUVD_MIF_CURR_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_REF_UV_ADDR_CONFIG 0x0185 +#define mmUVD_MIF_REF_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG 0x0186 +#define mmUVD_MIF_RECON1_UV_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_CURR_ADDR_CONFIG 0x0192 +#define mmUVD_MIF_CURR_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_REF_ADDR_CONFIG 0x0193 +#define mmUVD_MIF_REF_ADDR_CONFIG_BASE_IDX 1 +#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x01c5 +#define mmUVD_MIF_RECON1_ADDR_CONFIG_BASE_IDX 1 // addressBlock: uvd_uvdnpdec // base address: 0x20000 @@ -319,6 +339,8 @@ #define mmUVD_LMI_VM_CTRL_BASE_IDX 1 #define mmUVD_LMI_SWAP_CNTL 0x056d #define mmUVD_LMI_SWAP_CNTL_BASE_IDX 1 +#define mmUVD_MPC_CNTL 0x0577 +#define mmUVD_MPC_CNTL_BASE_IDX 1 #define mmUVD_MPC_SET_MUXA0 0x0579 #define mmUVD_MPC_SET_MUXA0_BASE_IDX 1 #define mmUVD_MPC_SET_MUXA1 0x057a diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h index 124383dac284..63457f9df4c5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h @@ -87,6 +87,26 @@ //CC_UVD_HARVESTING #define CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1 #define CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L +//UVD_DPG_LMA_CTL +#define UVD_DPG_LMA_CTL__READ_WRITE__SHIFT 0x0 +#define UVD_DPG_LMA_CTL__MASK_EN__SHIFT 0x1 +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT__SHIFT 0x2 +#define UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT 0x4 +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT 0x10 +#define UVD_DPG_LMA_CTL__READ_WRITE_MASK 0x00000001L +#define UVD_DPG_LMA_CTL__MASK_EN_MASK 0x00000002L +#define UVD_DPG_LMA_CTL__ADDR_AUTO_INCREMENT_MASK 0x00000004L +#define UVD_DPG_LMA_CTL__SRAM_SEL_MASK 0x00000010L +#define UVD_DPG_LMA_CTL__READ_WRITE_ADDR_MASK 0xFFFF0000L +//UVD_DPG_PAUSE +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ__SHIFT 0x0 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK__SHIFT 0x1 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ__SHIFT 0x2 +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK__SHIFT 0x3 +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK 0x00000001L +#define UVD_DPG_PAUSE__JPEG_PAUSE_DPG_ACK_MASK 0x00000002L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK 0x00000004L +#define UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK 0x00000008L //UVD_SCRATCH1 #define UVD_SCRATCH1__SCRATCH1_DATA__SHIFT 0x0 #define UVD_SCRATCH1__SCRATCH1_DATA_MASK 0xFFFFFFFFL @@ -965,6 +985,7 @@ #define UVD_LMI_CTRL2__STALL_ARB_UMC__SHIFT 0x8 #define UVD_LMI_CTRL2__MC_READ_ID_SEL__SHIFT 0x9 #define UVD_LMI_CTRL2__MC_WRITE_ID_SEL__SHIFT 0xb +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT 0x11 #define UVD_LMI_CTRL2__SPH_DIS_MASK 0x00000001L #define UVD_LMI_CTRL2__STALL_ARB_MASK 0x00000002L #define UVD_LMI_CTRL2__ASSERT_UMC_URGENT_MASK 0x00000004L @@ -973,6 +994,7 @@ #define UVD_LMI_CTRL2__STALL_ARB_UMC_MASK 0x00000100L #define UVD_LMI_CTRL2__MC_READ_ID_SEL_MASK 0x00000600L #define UVD_LMI_CTRL2__MC_WRITE_ID_SEL_MASK 0x00001800L +#define UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM_MASK 0x01FE0000L //UVD_MASTINT_EN #define UVD_MASTINT_EN__OVERRUN_RST__SHIFT 0x0 #define UVD_MASTINT_EN__VCPU_EN__SHIFT 0x1 @@ -983,6 +1005,7 @@ #define UVD_MASTINT_EN__SYS_EN_MASK 0x00000004L #define UVD_MASTINT_EN__INT_OVERRUN_MASK 0x007FFFF0L //UVD_SYS_INT_EN +#define UVD_SYS_INT_EN__UVD_JRBC_EN__SHIFT 0x4 #define UVD_SYS_INT_EN__UVD_JRBC_EN_MASK 0x00000010L //JPEG_CGC_CTRL #define JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT 0x0 @@ -1024,6 +1047,19 @@ #define UVD_LMI_CTRL__DB_IT_DATA_COHERENCY_EN_MASK 0x01000000L #define UVD_LMI_CTRL__IT_IT_DATA_COHERENCY_EN_MASK 0x02000000L #define UVD_LMI_CTRL__RFU_MASK 0xF8000000L +//UVD_LMI_STATUS +#define UVD_LMI_STATUS__READ_CLEAN__SHIFT 0x0 +#define UVD_LMI_STATUS__WRITE_CLEAN__SHIFT 0x1 +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW__SHIFT 0x2 +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN__SHIFT 0x3 +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW__SHIFT 0x6 +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW__SHIFT 0x9 +#define UVD_LMI_STATUS__READ_CLEAN_MASK 0x00000001L +#define UVD_LMI_STATUS__WRITE_CLEAN_MASK 0x00000002L +#define UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK 0x00000004L +#define UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK 0x00000008L +#define UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK 0x00000040L +#define UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK 0x00000200L //UVD_LMI_SWAP_CNTL #define UVD_LMI_SWAP_CNTL__RB_MC_SWAP__SHIFT 0x0 #define UVD_LMI_SWAP_CNTL__IB_MC_SWAP__SHIFT 0x2 @@ -1057,6 +1093,9 @@ #define UVD_LMI_SWAP_CNTL__RB_WR_MC_SWAP_MASK 0x0C000000L #define UVD_LMI_SWAP_CNTL__RE_MC_SWAP_MASK 0x30000000L #define UVD_LMI_SWAP_CNTL__MP_MC_SWAP_MASK 0xC0000000L +//UVD_MPC_CNTL +#define UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT 0x3 +#define UVD_MPC_CNTL__REPLACEMENT_MODE_MASK 0x00000038L //UVD_MPC_SET_MUXA0 #define UVD_MPC_SET_MUXA0__VARA_0__SHIFT 0x0 #define UVD_MPC_SET_MUXA0__VARA_1__SHIFT 0x6 @@ -1138,7 +1177,11 @@ #define UVD_VCPU_CACHE_SIZE2__CACHE_SIZE2_MASK 0x001FFFFFL //UVD_VCPU_CNTL #define UVD_VCPU_CNTL__CLK_EN__SHIFT 0x9 +#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP__SHIFT 0x11 +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT 0x14 #define UVD_VCPU_CNTL__CLK_EN_MASK 0x00000200L +#define UVD_VCPU_CNTL__MIF_WR_LOW_THRESHOLD_BP_MASK 0x00020000L +#define UVD_VCPU_CNTL__PRB_TIMEOUT_VAL_MASK 0x0FF00000L //UVD_SOFT_RESET #define UVD_SOFT_RESET__RBC_SOFT_RESET__SHIFT 0x0 #define UVD_SOFT_RESET__LBSI_SOFT_RESET__SHIFT 0x1 diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 8ae7adb7329b..d2e7c0fa96c2 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1532,6 +1532,94 @@ struct atom_smc_dpm_info_v4_3 uint32_t boardreserved[10]; }; +struct smudpm_i2ccontrollerconfig_t { + uint32_t enabled; + uint32_t slaveaddress; + uint32_t controllerport; + uint32_t controllername; + uint32_t thermalthrottler; + uint32_t i2cprotocol; + uint32_t i2cspeed; +}; + +struct atom_smc_dpm_info_v4_4 +{ + struct atom_common_table_header table_header; + uint32_t i2c_padding[3]; + + uint16_t maxvoltagestepgfx; + uint16_t maxvoltagestepsoc; + + uint8_t vddgfxvrmapping; + uint8_t vddsocvrmapping; + uint8_t vddmem0vrmapping; + uint8_t vddmem1vrmapping; + + uint8_t gfxulvphasesheddingmask; + uint8_t soculvphasesheddingmask; + uint8_t externalsensorpresent; + uint8_t padding8_v; + + uint16_t gfxmaxcurrent; + uint8_t gfxoffset; + uint8_t padding_telemetrygfx; + + uint16_t socmaxcurrent; + uint8_t socoffset; + uint8_t padding_telemetrysoc; + + uint16_t mem0maxcurrent; + uint8_t mem0offset; + uint8_t padding_telemetrymem0; + + uint16_t mem1maxcurrent; + uint8_t mem1offset; + uint8_t padding_telemetrymem1; + + + uint8_t acdcgpio; + uint8_t acdcpolarity; + uint8_t vr0hotgpio; + uint8_t vr0hotpolarity; + + uint8_t vr1hotgpio; + uint8_t vr1hotpolarity; + uint8_t padding1; + uint8_t padding2; + + + uint8_t ledpin0; + uint8_t ledpin1; + uint8_t ledpin2; + uint8_t padding8_4; + + + uint8_t pllgfxclkspreadenabled; + uint8_t pllgfxclkspreadpercent; + uint16_t pllgfxclkspreadfreq; + + + uint8_t uclkspreadenabled; + uint8_t uclkspreadpercent; + uint16_t uclkspreadfreq; + + + uint8_t fclkspreadenabled; + uint8_t fclkspreadpercent; + uint16_t fclkspreadfreq; + + + uint8_t fllgfxclkspreadenabled; + uint8_t fllgfxclkspreadpercent; + uint16_t fllgfxclkspreadfreq; + + + struct smudpm_i2ccontrollerconfig_t i2ccontrollers[7]; + + + uint32_t boardreserved[10]; +}; + /* *************************************************************************** Data Table asic_profiling_info structure diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index f43ed96cfa6c..64ecffd52126 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -146,10 +146,10 @@ struct kgd2kfd_shared_resources { * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val * * KFD currently uses 1024 (= 0x3ff) doorbells per process. If - * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means - * mask would be set to 0x1f8 and val set to 0x0f0. + * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means + * mask would be set to 0x1e0 and val set to 0x0e0. */ - unsigned int sdma_doorbell[2][2]; + unsigned int sdma_doorbell[2][8]; unsigned int reserved_doorbell_mask; unsigned int reserved_doorbell_val; @@ -409,9 +409,9 @@ struct kfd2kgd_calls { struct dma_fence **ef); void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm); void (*release_process_vm)(struct kgd_dev *kgd, void *vm); - uint32_t (*get_process_page_dir)(void *vm); + uint64_t (*get_process_page_dir)(void *vm); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, - uint32_t vmid, uint32_t page_table_base); + uint32_t vmid, uint64_t page_table_base); int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va, uint64_t size, void *vm, struct kgd_mem **mem, uint64_t *offset, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 448dee481a38..980e696989b1 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -113,6 +113,9 @@ enum amd_pp_sensors { AMDGPU_PP_SENSOR_GPU_POWER, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, + AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, + AMDGPU_PP_SENSOR_MIN_FAN_RPM, + AMDGPU_PP_SENSOR_MAX_FAN_RPM, }; enum amd_pp_task { @@ -227,6 +230,7 @@ struct amd_pm_funcs { enum amd_dpm_forced_level (*get_performance_level)(void *handle); enum amd_pm_state_type (*get_current_power_state)(void *handle); int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm); + int (*set_fan_speed_rpm)(void *handle, uint32_t rpm); int (*get_pp_num_states)(void *handle, struct pp_states_info *data); int (*get_pp_table)(void *handle, char **table); int (*set_pp_table)(void *handle, const char *buf, size_t size); @@ -271,6 +275,7 @@ struct amd_pm_funcs { int (*get_display_mode_validation_clocks)(void *handle, struct amd_pp_simple_clock_info *clocks); int (*notify_smu_enable_pwe)(void *handle); + int (*enable_mgpu_fan_boost)(void *handle); }; #endif diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index da4ebff5b74d..e8964cae6b93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -109,11 +109,8 @@ static int pp_sw_fini(void *handle) hwmgr_sw_fini(hwmgr); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - amdgpu_ucode_fini_bo(adev); - } + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; return 0; } @@ -124,9 +121,6 @@ static int pp_hw_init(void *handle) struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) - amdgpu_ucode_init_bo(adev); - ret = hwmgr_hw_init(hwmgr); if (ret) @@ -273,8 +267,23 @@ const struct amdgpu_ip_block_version pp_smu_ip_block = .funcs = &pp_ip_funcs, }; +/* This interface only be supported On Vi, + * because only smu7/8 can help to load gfx/sdma fw, + * smu need to be enabled before load other ip's fw. + * so call start smu to load smu7 fw and other ip's fw + */ static int pp_dpm_load_fw(void *handle) { + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) + return -EINVAL; + + if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { + pr_err("fw load failed\n"); + return -EINVAL; + } + return 0; } @@ -576,6 +585,24 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) return ret; } +static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) +{ + struct pp_hwmgr *hwmgr = handle; + int ret = 0; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) { + pr_info("%s was not implemented.\n", __func__); + return 0; + } + mutex_lock(&hwmgr->smu_lock); + ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); + mutex_unlock(&hwmgr->smu_lock); + return ret; +} + static int pp_dpm_get_pp_num_states(void *handle, struct pp_states_info *data) { @@ -813,6 +840,12 @@ static int pp_dpm_read_sensor(void *handle, int idx, case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK: *((uint32_t *)value) = hwmgr->pstate_mclk; return 0; + case AMDGPU_PP_SENSOR_MIN_FAN_RPM: + *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM; + return 0; + case AMDGPU_PP_SENSOR_MAX_FAN_RPM: + *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM; + return 0; default: mutex_lock(&hwmgr->smu_lock); ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); @@ -861,9 +894,14 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) pr_info("%s was not implemented.\n", __func__); return ret; } + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_info("power profile setting is for manual dpm mode only.\n"); + return ret; + } + mutex_lock(&hwmgr->smu_lock); - if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) - ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); + ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); mutex_unlock(&hwmgr->smu_lock); return ret; } @@ -1196,6 +1234,21 @@ static void pp_dpm_powergate_acp(void *handle, bool gate) hwmgr->hwmgr_func->powergate_acp(hwmgr, gate); } +static void pp_dpm_powergate_sdma(void *handle, bool gate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return; + + if (hwmgr->hwmgr_func->powergate_sdma == NULL) { + pr_info("%s was not implemented.\n", __func__); + return; + } + + hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate); +} + static int pp_set_powergating_by_smu(void *handle, uint32_t block_type, bool gate) { @@ -1218,6 +1271,9 @@ static int pp_set_powergating_by_smu(void *handle, case AMD_IP_BLOCK_TYPE_ACP: pp_dpm_powergate_acp(handle, gate); break; + case AMD_IP_BLOCK_TYPE_SDMA: + pp_dpm_powergate_sdma(handle, gate); + break; default: break; } @@ -1243,6 +1299,24 @@ static int pp_notify_smu_enable_pwe(void *handle) return 0; } +static int pp_enable_mgpu_fan_boost(void *handle) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; + + if (hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) { + return 0; + } + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1255,6 +1329,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_fan_speed_percent = pp_dpm_set_fan_speed_percent, .get_fan_speed_percent = pp_dpm_get_fan_speed_percent, .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm, + .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm, .get_pp_num_states = pp_dpm_get_pp_num_states, .get_pp_table = pp_dpm_get_pp_table, .set_pp_table = pp_dpm_set_pp_table, @@ -1287,4 +1362,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .display_clock_voltage_request = pp_display_clock_voltage_request, .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks, .notify_smu_enable_pwe = pp_notify_smu_enable_pwe, + .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 7500a3e61dba..47ac92369739 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -89,7 +89,6 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr_init_default_caps(hwmgr); hwmgr_set_user_specify_caps(hwmgr); hwmgr->fan_ctrl_is_in_default_mode = true; - hwmgr->reload_fw = 1; hwmgr_init_workload_prority(hwmgr); switch (hwmgr->chip_family) { @@ -209,17 +208,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) { int ret = 0; - if (!hwmgr || !hwmgr->smumgr_funcs) - return -EINVAL; - - if (hwmgr->smumgr_funcs->start_smu) { - ret = hwmgr->smumgr_funcs->start_smu(hwmgr); - if (ret) { - pr_err("smc start failed\n"); - return -EINVAL; - } - } - if (!hwmgr->pm_en) return 0; @@ -320,13 +308,6 @@ int hwmgr_resume(struct pp_hwmgr *hwmgr) if (!hwmgr) return -EINVAL; - if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) { - if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { - pr_err("smc start failed\n"); - return -EINVAL; - } - } - if (!hwmgr->pm_en) return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index d27c1c9df286..4588bddf8b33 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -488,7 +488,8 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, return 0; } -int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) +int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, + uint8_t id, uint32_t *frequency) { struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index 22e21668c93a..fe9e8ceef50e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -236,7 +236,7 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, - BIOS_CLKID id, uint32_t *frequency); + uint8_t id, uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 9808bd48b386..dd18cb710391 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -552,6 +552,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, { struct smu10_hwmgr *data = hwmgr->backend; struct amdgpu_device *adev = hwmgr->adev; + uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; + uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; if (hwmgr->smu_version < 0x1E3700) { pr_info("smu firmware version too old, can not set dpm level\n"); @@ -563,6 +565,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, (adev->rev_id >= 8)) return 0; + if (min_sclk < data->gfx_min_freq_limit) + min_sclk = data->gfx_min_freq_limit; + + min_sclk /= 100; /* transfer 10KHz to MHz */ + if (min_mclk < data->clock_table.FClocks[0].Freq) + min_mclk = data->clock_table.FClocks[0].Freq; + switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -595,18 +604,18 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - data->gfx_min_freq_limit/100); + min_sclk); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - data->gfx_min_freq_limit/100); + min_sclk); break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - SMU10_UMD_PSTATE_MIN_FCLK); + min_mclk); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_MIN_FCLK); + min_mclk); break; case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: smum_send_msg_to_smc_with_parameter(hwmgr, @@ -638,12 +647,12 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_AUTO: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - data->gfx_min_freq_limit/100); + min_sclk); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? SMU10_UMD_PSTATE_PEAK_FCLK : - SMU10_UMD_PSTATE_MIN_FCLK); + min_mclk); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, @@ -674,10 +683,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, - SMU10_UMD_PSTATE_MIN_FCLK); + min_mclk); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_MIN_FCLK); + min_mclk); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1144,6 +1153,14 @@ static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); } +static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate) +{ + if (gate) + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma); + else + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma); +} + static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) { if (bgate) { @@ -1199,6 +1216,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .smus_notify_pwe = smu10_smus_notify_pwe, .display_clock_voltage_request = smu10_display_clock_voltage_request, .powergate_gfx = smu10_gfx_off_control, + .powergate_sdma = smu10_powergate_sdma, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 04b7da0e39a6..6c99cbf51c08 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4106,17 +4106,17 @@ static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr) source->funcs = &smu7_irq_funcs; amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), - AMDGPU_IH_CLIENTID_LEGACY, + AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH, source); amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), - AMDGPU_IH_CLIENTID_LEGACY, + AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW, source); /* Register CTF(GPIO_19) interrupt */ amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev), - AMDGPU_IH_CLIENTID_LEGACY, + AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GPIO_19, source); @@ -5035,6 +5035,18 @@ static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw return 0; } +static int smu7_power_off_asic(struct pp_hwmgr *hwmgr) +{ + int result; + + result = smu7_disable_dpm_tasks(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "[disable_dpm_tasks] Failed to disable DPM!", + ); + + return result; +} + static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .backend_init = &smu7_hwmgr_backend_init, .backend_fini = &smu7_hwmgr_backend_fini, @@ -5092,6 +5104,7 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, + .power_off_asic = smu7_power_off_asic, }; uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c index 44527755e747..5bdc0df5a9f4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c @@ -260,6 +260,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (hwmgr->thermal_controller.fanInfo.bNoFan || (hwmgr->thermal_controller.fanInfo. ucTachometerPulsesPerRevolution == 0) || + speed == 0 || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return 0; @@ -272,7 +273,7 @@ int smu7_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, - CG_TACH_STATUS, TACH_PERIOD, tach_period); + CG_TACH_CTRL, TARGET_PERIOD, tach_period); return smu7_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index b8637049198d..fef111ddb736 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -880,7 +880,7 @@ static int smu8_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) smu8_update_low_mem_pstate(hwmgr, input); return 0; -}; +} static int smu8_setup_asic_task(struct pp_hwmgr *hwmgr) @@ -934,14 +934,6 @@ static void smu8_reset_cc6_data(struct pp_hwmgr *hwmgr) hw_data->cc6_settings.cpu_pstate_disable = false; } -static int smu8_power_off_asic(struct pp_hwmgr *hwmgr) -{ - smu8_power_up_display_clock_sys_pll(hwmgr); - smu8_clear_nb_dpm_flag(hwmgr); - smu8_reset_cc6_data(hwmgr); - return 0; -}; - static void smu8_program_voting_clients(struct pp_hwmgr *hwmgr) { cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, @@ -1011,6 +1003,17 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr) data->acp_boot_level = 0xff; } +static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + smu8_program_voting_clients(hwmgr); + if (smu8_start_dpm(hwmgr)) + return -EINVAL; + smu8_program_bootup_state(hwmgr); + smu8_reset_acp_boot_level(hwmgr); + + return 0; +} + static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { smu8_disable_nb_dpm(hwmgr); @@ -1020,18 +1023,16 @@ static int smu8_disable_dpm_tasks(struct pp_hwmgr *hwmgr) return -EINVAL; return 0; -}; +} -static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +static int smu8_power_off_asic(struct pp_hwmgr *hwmgr) { - smu8_program_voting_clients(hwmgr); - if (smu8_start_dpm(hwmgr)) - return -EINVAL; - smu8_program_bootup_state(hwmgr); - smu8_reset_acp_boot_level(hwmgr); - + smu8_disable_dpm_tasks(hwmgr); + smu8_power_up_display_clock_sys_pll(hwmgr); + smu8_clear_nb_dpm_flag(hwmgr); + smu8_reset_cc6_data(hwmgr); return 0; -}; +} static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, @@ -1227,14 +1228,17 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr) { - if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) + if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { + smu8_nbdpm_pstate_enable_disable(hwmgr, true, true); return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF); + } return 0; } static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr) { if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) { + smu8_nbdpm_pstate_enable_disable(hwmgr, false, true); return smum_send_msg_to_smc_with_parameter( hwmgr, PPSMC_MSG_UVDPowerON, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 2aab1b475945..4714b5b59825 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -39,6 +39,50 @@ uint16_t convert_to_vddc(uint8_t vid) return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE); } +int phm_copy_clock_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array, + uint32_t power_saving_clock_count) +{ + uint32_t array_size, i; + uint32_t *table; + + array_size = sizeof(uint32_t) * power_saving_clock_count; + table = kzalloc(array_size, GFP_KERNEL); + if (NULL == table) + return -ENOMEM; + + for (i = 0; i < power_saving_clock_count; i++) + table[i] = le32_to_cpu(pptable_array[i]); + + *pptable_info_array = table; + + return 0; +} + +int phm_copy_overdrive_settings_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array, + uint32_t od_setting_count) +{ + uint32_t array_size, i; + uint32_t *table; + + array_size = sizeof(uint32_t) * od_setting_count; + table = kzalloc(array_size, GFP_KERNEL); + if (NULL == table) + return -ENOMEM; + + for (i = 0; i < od_setting_count; i++) + table[i] = le32_to_cpu(pptable_array[i]); + + *pptable_info_array = table; + + return 0; +} + uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size) { u32 mask = 0; @@ -545,7 +589,7 @@ int phm_irq_process(struct amdgpu_device *adev, uint32_t client_id = entry->client_id; uint32_t src_id = entry->src_id; - if (client_id == AMDGPU_IH_CLIENTID_LEGACY) { + if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) { if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n", PCI_BUS_NUM(adev->pdev->devfn), diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h index 5454289d5226..ad33983a8064 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h @@ -47,6 +47,18 @@ struct watermarks { uint32_t padding[7]; }; +int phm_copy_clock_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array, + uint32_t power_saving_clock_count); + +int phm_copy_overdrive_settings_limits_array( + struct pp_hwmgr *hwmgr, + uint32_t **pptable_info_array, + const uint32_t *pptable_array, + uint32_t od_setting_count); + extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ca9be583fb62..419a1d77d661 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -39,6 +39,7 @@ #include "soc15_common.h" #include "pppcielanes.h" #include "vega10_hwmgr.h" +#include "vega10_smumgr.h" #include "vega10_processpptables.h" #include "vega10_pptable.h" #include "vega10_thermal.h" @@ -3713,6 +3714,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT; *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid); return 0; + case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: + ret = vega10_get_enabled_smc_features(hwmgr, (uint64_t *)value); + if (!ret) + *size = 8; + break; default: ret = -EINVAL; break; @@ -4940,16 +4946,6 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_performance_level = vega10_get_performance_level, }; -int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, - bool enable, uint32_t feature_mask) -{ - int msg = enable ? PPSMC_MSG_EnableSmuFeatures : - PPSMC_MSG_DisableSmuFeatures; - - return smum_send_msg_to_smc_with_parameter(hwmgr, - msg, feature_mask); -} - int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) { hwmgr->hwmgr_func = &vega10_hwmgr_funcs; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 339820da9e6a..89870556de1b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -441,7 +441,5 @@ int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); -int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, - bool enable, uint32_t feature_mask); #endif /* _VEGA10_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 22364875a943..2d88abf97e7b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -23,6 +23,7 @@ #include "hwmgr.h" #include "vega10_hwmgr.h" +#include "vega10_smumgr.h" #include "vega10_powertune.h" #include "vega10_ppsmc.h" #include "vega10_inc.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 16b1a9cf6cf0..b8747a5c9204 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -451,23 +451,23 @@ static int get_tdp_table( le16_to_cpu(power_tune_table_v2->usLoadLineResistance); } else { power_tune_table_v3 = (ATOM_Vega10_PowerTune_Table_V3 *)table; - tdp_table->usMaximumPowerDeliveryLimit = power_tune_table_v3->usSocketPowerLimit; - tdp_table->usTDC = power_tune_table_v3->usTdcLimit; - tdp_table->usEDCLimit = power_tune_table_v3->usEdcLimit; - tdp_table->usSoftwareShutdownTemp = power_tune_table_v3->usSoftwareShutdownTemp; - tdp_table->usTemperatureLimitTedge = power_tune_table_v3->usTemperatureLimitTedge; - tdp_table->usTemperatureLimitHotspot = power_tune_table_v3->usTemperatureLimitHotSpot; - tdp_table->usTemperatureLimitLiquid1 = power_tune_table_v3->usTemperatureLimitLiquid1; - tdp_table->usTemperatureLimitLiquid2 = power_tune_table_v3->usTemperatureLimitLiquid2; - tdp_table->usTemperatureLimitHBM = power_tune_table_v3->usTemperatureLimitHBM; - tdp_table->usTemperatureLimitVrVddc = power_tune_table_v3->usTemperatureLimitVrSoc; - tdp_table->usTemperatureLimitVrMvdd = power_tune_table_v3->usTemperatureLimitVrMem; - tdp_table->usTemperatureLimitPlx = power_tune_table_v3->usTemperatureLimitPlx; + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v3->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table_v3->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v3->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = le16_to_cpu(power_tune_table_v3->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = le16_to_cpu(power_tune_table_v3->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = le16_to_cpu(power_tune_table_v3->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = le16_to_cpu(power_tune_table_v3->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = le16_to_cpu(power_tune_table_v3->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = le16_to_cpu(power_tune_table_v3->usTemperatureLimitPlx); tdp_table->ucLiquid1_I2C_address = power_tune_table_v3->ucLiquid1_I2C_address; tdp_table->ucLiquid2_I2C_address = power_tune_table_v3->ucLiquid2_I2C_address; - tdp_table->usBoostStartTemperature = power_tune_table_v3->usBoostStartTemperature; - tdp_table->usBoostStopTemperature = power_tune_table_v3->usBoostStopTemperature; - tdp_table->ulBoostClock = power_tune_table_v3->ulBoostClock; + tdp_table->usBoostStartTemperature = le16_to_cpu(power_tune_table_v3->usBoostStartTemperature); + tdp_table->usBoostStopTemperature = le16_to_cpu(power_tune_table_v3->usBoostStopTemperature); + tdp_table->ulBoostClock = le32_to_cpu(power_tune_table_v3->ulBoostClock); get_scl_sda_value(power_tune_table_v3->ucLiquid_I2C_Line, &scl, &sda); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index aa044c1955fe..3f807d6c95ce 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -23,6 +23,7 @@ #include "vega10_thermal.h" #include "vega10_hwmgr.h" +#include "vega10_smumgr.h" #include "vega10_ppsmc.h" #include "vega10_inc.h" #include "soc15_common.h" @@ -311,6 +312,7 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) int result = 0; if (hwmgr->thermal_controller.fanInfo.bNoFan || + speed == 0 || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) return -1; @@ -321,9 +323,9 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (!result) { crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - WREG32_SOC15(THM, 0, mmCG_TACH_STATUS, - REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS), - CG_TACH_STATUS, TACH_PERIOD, + WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), + CG_TACH_CTRL, TARGET_PERIOD, tach_period)); } return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 0789d64246ca..9600e2f226e9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -745,8 +745,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); - result = vega12_copy_table_to_smc(hwmgr, - (uint8_t *)pp_table, TABLE_PPTABLE); + result = smum_smc_table_manager(hwmgr, + (uint8_t *)pp_table, TABLE_PPTABLE, false); PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); @@ -1317,7 +1317,11 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, break; case AMDGPU_PP_SENSOR_GPU_POWER: ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value); - + break; + case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: + ret = vega12_get_enabled_smc_features(hwmgr, (uint64_t *)value); + if (!ret) + *size = 8; break; default: ret = -EINVAL; @@ -2103,8 +2107,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { - result = vega12_copy_table_to_smc(hwmgr, - (uint8_t *)wm_table, TABLE_WATERMARKS); + result = smum_smc_table_manager(hwmgr, + (uint8_t *)wm_table, TABLE_WATERMARKS, false); PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL); data->water_marks_bitmap |= WaterMarksLoaded; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index cb3a5b1737c8..9817f7a5ed29 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -99,50 +99,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) return 0; } -static int copy_clock_limits_array( - struct pp_hwmgr *hwmgr, - uint32_t **pptable_info_array, - const uint32_t *pptable_array) -{ - uint32_t array_size, i; - uint32_t *table; - - array_size = sizeof(uint32_t) * ATOM_VEGA12_PPCLOCK_COUNT; - - table = kzalloc(array_size, GFP_KERNEL); - if (NULL == table) - return -ENOMEM; - - for (i = 0; i < ATOM_VEGA12_PPCLOCK_COUNT; i++) - table[i] = pptable_array[i]; - - *pptable_info_array = table; - - return 0; -} - -static int copy_overdrive_settings_limits_array( - struct pp_hwmgr *hwmgr, - uint32_t **pptable_info_array, - const uint32_t *pptable_array) -{ - uint32_t array_size, i; - uint32_t *table; - - array_size = sizeof(uint32_t) * ATOM_VEGA12_ODSETTING_COUNT; - - table = kzalloc(array_size, GFP_KERNEL); - if (NULL == table) - return -ENOMEM; - - for (i = 0; i < ATOM_VEGA12_ODSETTING_COUNT; i++) - table[i] = pptable_array[i]; - - *pptable_info_array = table; - - return 0; -} - static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable) { struct pp_atomfwctrl_smc_dpm_parameters smc_dpm_table; @@ -250,14 +206,22 @@ static int init_powerplay_table_information( phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); - if (powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX] > VEGA12_ENGINECLOCK_HARDMAX) + if (le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]) > VEGA12_ENGINECLOCK_HARDMAX) hwmgr->platform_descriptor.overdriveLimit.engineClock = VEGA12_ENGINECLOCK_HARDMAX; else - hwmgr->platform_descriptor.overdriveLimit.engineClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]; - hwmgr->platform_descriptor.overdriveLimit.memoryClock = powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]; - - copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->ODSettingsMax); - copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->ODSettingsMin); + hwmgr->platform_descriptor.overdriveLimit.engineClock = + le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_GFXCLKFMAX]); + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_UCLKFMAX]); + + phm_copy_overdrive_settings_limits_array(hwmgr, + &pptable_information->od_settings_max, + powerplay_table->ODSettingsMax, + ATOM_VEGA12_ODSETTING_COUNT); + phm_copy_overdrive_settings_limits_array(hwmgr, + &pptable_information->od_settings_min, + powerplay_table->ODSettingsMin, + ATOM_VEGA12_ODSETTING_COUNT); /* hwmgr->platformDescriptor.minOverdriveVDDC = 0; hwmgr->platformDescriptor.maxOverdriveVDDC = 0; @@ -267,15 +231,15 @@ static int init_powerplay_table_information( && hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ACOverdriveSupport); - pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1; - pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2; - pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit; - pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit; - pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit; + pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1); + pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2); + pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit); + pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit); + pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit); - pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; + pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp); - hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE]; + hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->ODSettingsMax[ATOM_VEGA12_ODSETTING_POWERPERCENTAGE]); disable_power_control = 0; if (!disable_power_control) { @@ -285,8 +249,8 @@ static int init_powerplay_table_information( PHM_PlatformCaps_PowerControl); } - copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax); - copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin); + phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockMax, ATOM_VEGA12_PPCLOCK_COUNT); + phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockMin, ATOM_VEGA12_PPCLOCK_COUNT); pptable_information->smc_pptable = (PPTable_t *)kmalloc(sizeof(PPTable_t), GFP_KERNEL); if (pptable_information->smc_pptable == NULL) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index d45cbfe8e184..b4dbbb7c334c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -46,6 +46,9 @@ #include "ppinterrupt.h" #include "pp_overdriver.h" #include "pp_thermal.h" +#include "soc15_common.h" +#include "smuio/smuio_9_0_offset.h" +#include "smuio/smuio_9_0_sh_mask.h" static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) { @@ -461,7 +464,7 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, "[GetNumOfDpmLevel] failed to get dpm levels!", return ret); - vega20_read_arg_from_smc(hwmgr, num_of_levels); + *num_of_levels = smum_get_argument(hwmgr); PP_ASSERT_WITH_CODE(*num_of_levels > 0, "[GetNumOfDpmLevel] number of clk levels is invalid!", return -EINVAL); @@ -481,7 +484,7 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr, "[GetDpmFreqByIndex] failed to get dpm freq by index!", return ret); - vega20_read_arg_from_smc(hwmgr, clk); + *clk = smum_get_argument(hwmgr); PP_ASSERT_WITH_CODE(*clk, "[GetDpmFreqByIndex] clk value is invalid!", return -EINVAL); @@ -743,8 +746,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr) memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t)); - result = vega20_copy_table_to_smc(hwmgr, - (uint8_t *)pp_table, TABLE_PPTABLE); + result = smum_smc_table_manager(hwmgr, + (uint8_t *)pp_table, TABLE_PPTABLE, false); PP_ASSERT_WITH_CODE(!result, "[InitSMCTable] Failed to upload PPtable!", return result); @@ -1044,7 +1047,7 @@ static int vega20_od8_get_gfx_clock_base_voltage( "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!", return ret); - vega20_read_arg_from_smc(hwmgr, voltage); + *voltage = smum_get_argument(hwmgr); *voltage = *voltage / VOLTAGE_SCALE; return 0; @@ -1067,7 +1070,7 @@ static int vega20_od8_initialize_default_settings( vega20_od8_set_feature_id(hwmgr); /* Set default values */ - ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, true); PP_ASSERT_WITH_CODE(!ret, "Failed to export over drive table!", return ret); @@ -1195,7 +1198,7 @@ static int vega20_od8_initialize_default_settings( } } - ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)od_table, TABLE_OVERDRIVE, false); PP_ASSERT_WITH_CODE(!ret, "Failed to import over drive table!", return ret); @@ -1214,7 +1217,7 @@ static int vega20_od8_set_settings( struct vega20_od8_single_setting *od8_settings = data->od8_settings.od8_settings_array; - ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, true); PP_ASSERT_WITH_CODE(!ret, "Failed to export over drive table!", return ret); @@ -1271,7 +1274,7 @@ static int vega20_od8_set_settings( break; } - ret = vega20_copy_table_to_smc(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)(&od_table), TABLE_OVERDRIVE, false); PP_ASSERT_WITH_CODE(!ret, "Failed to import over drive table!", return ret); @@ -1401,7 +1404,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, (clock_select << 16))) == 0, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!", return ret); - vega20_read_arg_from_smc(hwmgr, clock); + *clock = smum_get_argument(hwmgr); /* if DC limit is zero, return AC limit */ if (*clock == 0) { @@ -1410,7 +1413,7 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr, (clock_select << 16))) == 0, "[GetMaxSustainableClock] failed to get max AC clock from SMC!", return ret); - vega20_read_arg_from_smc(hwmgr, clock); + *clock = smum_get_argument(hwmgr); } return 0; @@ -1474,6 +1477,19 @@ static int vega20_init_max_sustainable_clocks(struct pp_hwmgr *hwmgr) return 0; } +static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr) +{ + int result; + + result = smum_send_msg_to_smc(hwmgr, + PPSMC_MSG_SetMGpuFanBoostLimitRpm); + PP_ASSERT_WITH_CODE(!result, + "[EnableMgpuFan] Failed to enable mgpu fan boost!", + return result); + + return 0; +} + static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = @@ -1544,6 +1560,14 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) "[EnableDPMTasks] Failed to populate umdpstate clocks!", return result); + result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit, + POWER_SOURCE_AC << 16); + PP_ASSERT_WITH_CODE(!result, + "[GetPptLimit] get default PPT limit failed!", + return result); + hwmgr->power_limit = + hwmgr->default_power_limit = smum_get_argument(hwmgr); + return 0; } @@ -1770,14 +1794,14 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0, "[GetClockRanges] Failed to get max clock from SMC!", return ret); - vega20_read_arg_from_smc(hwmgr, clock); + *clock = smum_get_argument(hwmgr); } else { PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clock_select << 16))) == 0, "[GetClockRanges] Failed to get min clock from SMC!", return ret); - vega20_read_arg_from_smc(hwmgr, clock); + *clock = smum_get_argument(hwmgr); } return 0; @@ -1841,7 +1865,7 @@ static int vega20_get_gpu_power(struct pp_hwmgr *hwmgr, int ret = 0; SmuMetrics_t metrics_table; - ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); PP_ASSERT_WITH_CODE(!ret, "Failed to export SMU METRICS table!", return ret); @@ -1862,7 +1886,7 @@ static int vega20_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16))) == 0, "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!", return ret); - vega20_read_arg_from_smc(hwmgr, &gfx_clk); + gfx_clk = smum_get_argument(hwmgr); *gfx_freq = gfx_clk * 100; @@ -1880,7 +1904,7 @@ static int vega20_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16))) == 0, "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!", return ret); - vega20_read_arg_from_smc(hwmgr, &mem_clk); + mem_clk = smum_get_argument(hwmgr); *mclk_freq = mem_clk * 100; @@ -1893,7 +1917,7 @@ static int vega20_get_current_activity_percent(struct pp_hwmgr *hwmgr, int ret = 0; SmuMetrics_t metrics_table; - ret = vega20_copy_table_from_smc(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS); + ret = smum_smc_table_manager(hwmgr, (uint8_t *)&metrics_table, TABLE_SMU_METRICS, true); PP_ASSERT_WITH_CODE(!ret, "Failed to export SMU METRICS table!", return ret); @@ -1907,6 +1931,8 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t val_vid; int ret = 0; switch (idx) { @@ -1941,6 +1967,18 @@ static int vega20_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 16; ret = vega20_get_gpu_power(hwmgr, (uint32_t *)value); break; + case AMDGPU_PP_SENSOR_VDDGFX: + val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) & + SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >> + SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT; + *((uint32_t *)value) = + (uint32_t)convert_to_vddc((uint8_t)val_vid); + break; + case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK: + ret = vega20_get_enabled_smc_features(hwmgr, (uint64_t *)value); + if (!ret) + *size = 8; + break; default: ret = -EINVAL; break; @@ -2264,6 +2302,25 @@ static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr *hwmgr) return AMD_FAN_CTRL_AUTO; } +static void vega20_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + switch (mode) { + case AMD_FAN_CTRL_NONE: + vega20_fan_ctrl_set_fan_speed_percent(hwmgr, 100); + break; + case AMD_FAN_CTRL_MANUAL: + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + vega20_fan_ctrl_stop_smc_fan_control(hwmgr); + break; + case AMD_FAN_CTRL_AUTO: + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + vega20_fan_ctrl_start_smc_fan_control(hwmgr); + break; + default: + break; + } +} + static int vega20_get_dal_power_level(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *info) { @@ -2612,18 +2669,18 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, data->gfxclk_overdrive = false; data->memclk_overdrive = false; - ret = vega20_copy_table_from_smc(hwmgr, - (uint8_t *)od_table, - TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, + (uint8_t *)od_table, + TABLE_OVERDRIVE, true); PP_ASSERT_WITH_CODE(!ret, "Failed to export overdrive table!", return ret); break; case PP_OD_COMMIT_DPM_TABLE: - ret = vega20_copy_table_to_smc(hwmgr, - (uint8_t *)od_table, - TABLE_OVERDRIVE); + ret = smum_smc_table_manager(hwmgr, + (uint8_t *)od_table, + TABLE_OVERDRIVE, false); PP_ASSERT_WITH_CODE(!ret, "Failed to import overdrive table!", return ret); @@ -2847,8 +2904,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { - result = vega20_copy_table_to_smc(hwmgr, - (uint8_t *)wm_table, TABLE_WATERMARKS); + result = smum_smc_table_manager(hwmgr, + (uint8_t *)wm_table, TABLE_WATERMARKS, false); PP_ASSERT_WITH_CODE(!result, "Failed to update WMTABLE!", return result); @@ -3118,6 +3175,34 @@ static int vega20_power_off_asic(struct pp_hwmgr *hwmgr) return result; } +static int conv_power_profile_to_pplib_workload(int power_profile) +{ + int pplib_workload = 0; + + switch (power_profile) { + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_POWERSAVING: + pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + case PP_SMC_POWER_PROFILE_CUSTOM: + pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT; + break; + } + + return pplib_workload; +} + static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) { DpmActivityMonitorCoeffInt_t activity_monitor; @@ -3153,14 +3238,14 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ - workload_type = i + 1; + workload_type = conv_power_profile_to_pplib_workload(i); result = vega20_get_activity_monitor_coeff(hwmgr, (uint8_t *)(&activity_monitor), workload_type); PP_ASSERT_WITH_CODE(!result, "[GetPowerProfile] Failed to get activity monitor!", return result); - size += sprintf(buf + size, "%2d(%14s%s)\n", + size += sprintf(buf + size, "%2d %14s%s:\n", i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " "); size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", @@ -3226,10 +3311,15 @@ static int vega20_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size) { DpmActivityMonitorCoeffInt_t activity_monitor; - int result = 0; + int workload_type, result = 0; hwmgr->power_profile_mode = input[size]; + if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode); + return -EINVAL; + } + if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { if (size < 10) return -EINVAL; @@ -3296,8 +3386,11 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui return result); } + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = + conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask, - 1 << hwmgr->power_profile_mode); + 1 << workload_type); return 0; } @@ -3427,15 +3520,25 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .disable_smc_firmware_ctf = vega20_thermal_disable_alert, /* fan control related */ + .get_fan_speed_percent = + vega20_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = + vega20_fan_ctrl_set_fan_speed_percent, .get_fan_speed_info = vega20_fan_ctrl_get_fan_speed_info, .get_fan_speed_rpm = vega20_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = + vega20_fan_ctrl_set_fan_speed_rpm, .get_fan_control_mode = vega20_get_fan_control_mode, + .set_fan_control_mode = + vega20_set_fan_control_mode, /* smu memory related */ .notify_cac_buffer_info = vega20_notify_cac_buffer_info, + .enable_mgpu_fan_boost = + vega20_enable_mgpu_fan_boost, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c index 5f1f7a32ac24..e5f7f8230065 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c @@ -100,9 +100,8 @@ static void dump_pptable(PPTable_t *pptable) pr_info("PpmTemperatureThreshold = %d\n", pptable->PpmTemperatureThreshold); pr_info("MemoryOnPackage = 0x%02x\n", pptable->MemoryOnPackage); - pr_info("padding8_limits[0] = 0x%02x\n", pptable->padding8_limits[0]); - pr_info("padding8_limits[1] = 0x%02x\n", pptable->padding8_limits[1]); - pr_info("padding8_limits[2] = 0x%02x\n", pptable->padding8_limits[2]); + pr_info("padding8_limits = 0x%02x\n", pptable->padding8_limits); + pr_info("Tvr_SocLimit = %d\n", pptable->Tvr_SocLimit); pr_info("UlvVoltageOffsetSoc = %d\n", pptable->UlvVoltageOffsetSoc); pr_info("UlvVoltageOffsetGfx = %d\n", pptable->UlvVoltageOffsetGfx); @@ -417,8 +416,8 @@ static void dump_pptable(PPTable_t *pptable) pr_info("FanGainEdge = %d\n", pptable->FanGainEdge); pr_info("FanGainHotspot = %d\n", pptable->FanGainHotspot); pr_info("FanGainLiquid = %d\n", pptable->FanGainLiquid); - pr_info("FanGainVrVddc = %d\n", pptable->FanGainVrVddc); - pr_info("FanGainVrMvdd = %d\n", pptable->FanGainVrMvdd); + pr_info("FanGainVrGfx = %d\n", pptable->FanGainVrGfx); + pr_info("FanGainVrSoc = %d\n", pptable->FanGainVrSoc); pr_info("FanGainPlx = %d\n", pptable->FanGainPlx); pr_info("FanGainHbm = %d\n", pptable->FanGainHbm); pr_info("FanPwmMin = %d\n", pptable->FanPwmMin); @@ -533,23 +532,20 @@ static void dump_pptable(PPTable_t *pptable) pr_info("MinVoltageUlvGfx = %d\n", pptable->MinVoltageUlvGfx); pr_info("MinVoltageUlvSoc = %d\n", pptable->MinVoltageUlvSoc); - for (i = 0; i < 14; i++) - pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); + pr_info("MGpuFanBoostLimitRpm = %d\n", pptable->MGpuFanBoostLimitRpm); + pr_info("padding16_Fan = %d\n", pptable->padding16_Fan); + + pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); + pr_info("FanGainVrMem0 = %d\n", pptable->FanGainVrMem0); - pr_info("Liquid1_I2C_address = 0x%x\n", pptable->Liquid1_I2C_address); - pr_info("Liquid2_I2C_address = 0x%x\n", pptable->Liquid2_I2C_address); - pr_info("Vr_I2C_address = 0x%x\n", pptable->Vr_I2C_address); - pr_info("Plx_I2C_address = 0x%x\n", pptable->Plx_I2C_address); + pr_info("DcBtcGb[AVFS_VOLTAGE_GFX] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_GFX]); + pr_info("DcBtcGb[AVFS_VOLTAGE_SOC] = 0x%x\n", pptable->DcBtcGb[AVFS_VOLTAGE_SOC]); - pr_info("Liquid_I2C_LineSCL = 0x%x\n", pptable->Liquid_I2C_LineSCL); - pr_info("Liquid_I2C_LineSDA = 0x%x\n", pptable->Liquid_I2C_LineSDA); - pr_info("Vr_I2C_LineSCL = 0x%x\n", pptable->Vr_I2C_LineSCL); - pr_info("Vr_I2C_LineSDA = 0x%x\n", pptable->Vr_I2C_LineSDA); + for (i = 0; i < 11; i++) + pr_info("Reserved[%d] = 0x%x\n", i, pptable->Reserved[i]); - pr_info("Plx_I2C_LineSCL = 0x%x\n", pptable->Plx_I2C_LineSCL); - pr_info("Plx_I2C_LineSDA = 0x%x\n", pptable->Plx_I2C_LineSDA); - pr_info("VrSensorPresent = 0x%x\n", pptable->VrSensorPresent); - pr_info("LiquidSensorPresent = 0x%x\n", pptable->LiquidSensorPresent); + for (i = 0; i < 3; i++) + pr_info("Padding32[%d] = 0x%x\n", i, pptable->Padding32[i]); pr_info("MaxVoltageStepGfx = 0x%x\n", pptable->MaxVoltageStepGfx); pr_info("MaxVoltageStepSoc = 0x%x\n", pptable->MaxVoltageStepSoc); @@ -611,6 +607,24 @@ static void dump_pptable(PPTable_t *pptable) pr_info("FllGfxclkSpreadPercent = %d\n", pptable->FllGfxclkSpreadPercent); pr_info("FllGfxclkSpreadFreq = %d\n", pptable->FllGfxclkSpreadFreq); + for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { + pr_info("I2cControllers[%d]:\n", i); + pr_info(" .Enabled = %d\n", + pptable->I2cControllers[i].Enabled); + pr_info(" .SlaveAddress = 0x%x\n", + pptable->I2cControllers[i].SlaveAddress); + pr_info(" .ControllerPort = %d\n", + pptable->I2cControllers[i].ControllerPort); + pr_info(" .ControllerName = %d\n", + pptable->I2cControllers[i].ControllerName); + pr_info(" .ThermalThrottler = %d\n", + pptable->I2cControllers[i].ThermalThrottler); + pr_info(" .I2cProtocol = %d\n", + pptable->I2cControllers[i].I2cProtocol); + pr_info(" .I2cSpeed = %d\n", + pptable->I2cControllers[i].I2cSpeed); + } + for (i = 0; i < 10; i++) pr_info("BoardReserved[%d] = 0x%x\n", i, pptable->BoardReserved[i]); @@ -661,50 +675,6 @@ static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) return 0; } -static int copy_clock_limits_array( - struct pp_hwmgr *hwmgr, - uint32_t **pptable_info_array, - const uint32_t *pptable_array, - uint32_t power_saving_clock_count) -{ - uint32_t array_size, i; - uint32_t *table; - - array_size = sizeof(uint32_t) * power_saving_clock_count; - table = kzalloc(array_size, GFP_KERNEL); - if (NULL == table) - return -ENOMEM; - - for (i = 0; i < power_saving_clock_count; i++) - table[i] = pptable_array[i]; - - *pptable_info_array = table; - - return 0; -} - -static int copy_overdrive_settings_limits_array( - struct pp_hwmgr *hwmgr, - uint32_t **pptable_info_array, - const uint32_t *pptable_array, - uint32_t od_setting_count) -{ - uint32_t array_size, i; - uint32_t *table; - - array_size = sizeof(uint32_t) * od_setting_count; - table = kzalloc(array_size, GFP_KERNEL); - if (NULL == table) - return -ENOMEM; - - for (i = 0; i < od_setting_count; i++) - table[i] = pptable_array[i]; - - *pptable_info_array = table; - - return 0; -} - static int copy_overdrive_feature_capabilities_array( struct pp_hwmgr *hwmgr, uint8_t **pptable_info_array, @@ -721,7 +691,7 @@ static int copy_overdrive_feature_capabilities_array( return -ENOMEM; for (i = 0; i < od_feature_count; i++) { - table[i] = pptable_array[i]; + table[i] = le32_to_cpu(pptable_array[i]); if (table[i]) od_supported = true; } @@ -737,29 +707,19 @@ static int copy_overdrive_feature_capabilities_array( static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable) { - struct atom_smc_dpm_info_v4_3 *smc_dpm_table; + struct atom_smc_dpm_info_v4_4 *smc_dpm_table; int index = GetIndexIntoMasterDataTable(smc_dpm_info); + int i; PP_ASSERT_WITH_CODE( smc_dpm_table = smu_atom_get_data_table(hwmgr->adev, index, NULL, NULL, NULL), "[appendVbiosPPTable] Failed to retrieve Smc Dpm Table from VBIOS!", return -1); - ppsmc_pptable->Liquid1_I2C_address = smc_dpm_table->liquid1_i2c_address; - ppsmc_pptable->Liquid2_I2C_address = smc_dpm_table->liquid2_i2c_address; - ppsmc_pptable->Vr_I2C_address = smc_dpm_table->vr_i2c_address; - ppsmc_pptable->Plx_I2C_address = smc_dpm_table->plx_i2c_address; - - ppsmc_pptable->Liquid_I2C_LineSCL = smc_dpm_table->liquid_i2c_linescl; - ppsmc_pptable->Liquid_I2C_LineSDA = smc_dpm_table->liquid_i2c_linesda; - ppsmc_pptable->Vr_I2C_LineSCL = smc_dpm_table->vr_i2c_linescl; - ppsmc_pptable->Vr_I2C_LineSDA = smc_dpm_table->vr_i2c_linesda; - - ppsmc_pptable->Plx_I2C_LineSCL = smc_dpm_table->plx_i2c_linescl; - ppsmc_pptable->Plx_I2C_LineSDA = smc_dpm_table->plx_i2c_linesda; - ppsmc_pptable->VrSensorPresent = smc_dpm_table->vrsensorpresent; - ppsmc_pptable->LiquidSensorPresent = smc_dpm_table->liquidsensorpresent; - + memset(ppsmc_pptable->Padding32, + 0, + sizeof(struct atom_smc_dpm_info_v4_4) - + sizeof(struct atom_common_table_header)); ppsmc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx; ppsmc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc; @@ -818,6 +778,24 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent; ppsmc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq; + if ((smc_dpm_table->table_header.format_revision == 4) && + (smc_dpm_table->table_header.content_revision == 4)) { + for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) { + ppsmc_pptable->I2cControllers[i].Enabled = + smc_dpm_table->i2ccontrollers[i].enabled; + ppsmc_pptable->I2cControllers[i].SlaveAddress = + smc_dpm_table->i2ccontrollers[i].slaveaddress; + ppsmc_pptable->I2cControllers[i].ControllerPort = + smc_dpm_table->i2ccontrollers[i].controllerport; + ppsmc_pptable->I2cControllers[i].ThermalThrottler = + smc_dpm_table->i2ccontrollers[i].thermalthrottler; + ppsmc_pptable->I2cControllers[i].I2cProtocol = + smc_dpm_table->i2ccontrollers[i].i2cprotocol; + ppsmc_pptable->I2cControllers[i].I2cSpeed = + smc_dpm_table->i2ccontrollers[i].i2cspeed; + } + } + return 0; } @@ -834,6 +812,8 @@ static int init_powerplay_table_information( hwmgr->thermal_controller.ucType = powerplay_table->ucThermalControllerType; pptable_information->uc_thermal_controller_type = powerplay_table->ucThermalControllerType; + hwmgr->thermal_controller.fanInfo.ulMinRPM = 0; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = powerplay_table->smcPPTable.FanMaximumRpm; set_hw_cap(hwmgr, ATOM_VEGA20_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, @@ -842,34 +822,40 @@ static int init_powerplay_table_information( phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_MicrocodeFanControl); if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) { - od_feature_count = (powerplay_table->OverDrive8Table.ODFeatureCount > ATOM_VEGA20_ODFEATURE_COUNT) ? - ATOM_VEGA20_ODFEATURE_COUNT : powerplay_table->OverDrive8Table.ODFeatureCount; - od_setting_count = (powerplay_table->OverDrive8Table.ODSettingCount > ATOM_VEGA20_ODSETTING_COUNT) ? - ATOM_VEGA20_ODSETTING_COUNT : powerplay_table->OverDrive8Table.ODSettingCount; + od_feature_count = + (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) > + ATOM_VEGA20_ODFEATURE_COUNT) ? + ATOM_VEGA20_ODFEATURE_COUNT : + le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount); + od_setting_count = + (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) > + ATOM_VEGA20_ODSETTING_COUNT) ? + ATOM_VEGA20_ODSETTING_COUNT : + le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount); copy_overdrive_feature_capabilities_array(hwmgr, &pptable_information->od_feature_capabilities, powerplay_table->OverDrive8Table.ODFeatureCapabilities, od_feature_count); - copy_overdrive_settings_limits_array(hwmgr, + phm_copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_max, powerplay_table->OverDrive8Table.ODSettingsMax, od_setting_count); - copy_overdrive_settings_limits_array(hwmgr, + phm_copy_overdrive_settings_limits_array(hwmgr, &pptable_information->od_settings_min, powerplay_table->OverDrive8Table.ODSettingsMin, od_setting_count); } - pptable_information->us_small_power_limit1 = powerplay_table->usSmallPowerLimit1; - pptable_information->us_small_power_limit2 = powerplay_table->usSmallPowerLimit2; - pptable_information->us_boost_power_limit = powerplay_table->usBoostPowerLimit; - pptable_information->us_od_turbo_power_limit = powerplay_table->usODTurboPowerLimit; - pptable_information->us_od_powersave_power_limit = powerplay_table->usODPowerSavePowerLimit; + pptable_information->us_small_power_limit1 = le16_to_cpu(powerplay_table->usSmallPowerLimit1); + pptable_information->us_small_power_limit2 = le16_to_cpu(powerplay_table->usSmallPowerLimit2); + pptable_information->us_boost_power_limit = le16_to_cpu(powerplay_table->usBoostPowerLimit); + pptable_information->us_od_turbo_power_limit = le16_to_cpu(powerplay_table->usODTurboPowerLimit); + pptable_information->us_od_powersave_power_limit = le16_to_cpu(powerplay_table->usODPowerSavePowerLimit); - pptable_information->us_software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp; + pptable_information->us_software_shutdown_temp = le16_to_cpu(powerplay_table->usSoftwareShutdownTemp); - hwmgr->platform_descriptor.TDPODLimit = (uint16_t)powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]; + hwmgr->platform_descriptor.TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); disable_power_control = 0; if (!disable_power_control && hwmgr->platform_descriptor.TDPODLimit) @@ -877,13 +863,16 @@ static int init_powerplay_table_information( phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PowerControl); if (powerplay_table->PowerSavingClockTable.ucTableRevision == 1) { - power_saving_clock_count = (powerplay_table->PowerSavingClockTable.PowerSavingClockCount >= ATOM_VEGA20_PPCLOCK_COUNT) ? - ATOM_VEGA20_PPCLOCK_COUNT : powerplay_table->PowerSavingClockTable.PowerSavingClockCount; - copy_clock_limits_array(hwmgr, + power_saving_clock_count = + (le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount) >= + ATOM_VEGA20_PPCLOCK_COUNT) ? + ATOM_VEGA20_PPCLOCK_COUNT : + le32_to_cpu(powerplay_table->PowerSavingClockTable.PowerSavingClockCount); + phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_max, powerplay_table->PowerSavingClockTable.PowerSavingClockMax, power_saving_clock_count); - copy_clock_limits_array(hwmgr, + phm_copy_clock_limits_array(hwmgr, &pptable_information->power_saving_clock_min, powerplay_table->PowerSavingClockTable.PowerSavingClockMin, power_saving_clock_count); @@ -893,7 +882,15 @@ static int init_powerplay_table_information( if (pptable_information->smc_pptable == NULL) return -ENOMEM; - memcpy(pptable_information->smc_pptable, &(powerplay_table->smcPPTable), sizeof(PPTable_t)); + if (powerplay_table->smcPPTable.Version <= 2) + memcpy(pptable_information->smc_pptable, + &(powerplay_table->smcPPTable), + sizeof(PPTable_t) - + sizeof(I2cControllerConfig_t) * I2C_CONTROLLER_NAME_COUNT); + else + memcpy(pptable_information->smc_pptable, + &(powerplay_table->smcPPTable), + sizeof(PPTable_t)); result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c index 2984ddd5428c..ede54e87e287 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c @@ -29,6 +29,78 @@ #include "soc15_common.h" #include "pp_debug.h" +static int vega20_disable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + int ret = 0; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + ret = vega20_enable_smc_features( + hwmgr, false, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Disable FAN CONTROL feature Failed!", + return ret); + data->smu_features[GNLD_FAN_CONTROL].enabled = false; + } + + return ret; +} + +int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) + return vega20_disable_fan_control_feature(hwmgr); + + return 0; +} + +static int vega20_enable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + int ret = 0; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + ret = vega20_enable_smc_features( + hwmgr, true, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap); + PP_ASSERT_WITH_CODE(!ret, + "Enable FAN CONTROL feature Failed!", + return ret); + data->smu_features[GNLD_FAN_CONTROL].enabled = true; + } + + return ret; +} + +int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + struct vega20_hwmgr *data = hwmgr->backend; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) + return vega20_enable_fan_control_feature(hwmgr); + + return 0; +} + +static int vega20_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + struct amdgpu_device *adev = hwmgr->adev; + + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), + CG_FDO_CTRL2, TMIN, 0)); + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), + CG_FDO_CTRL2, FDO_PWM_MODE, mode)); + + return 0; +} + static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) { int ret = 0; @@ -37,20 +109,67 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) PPSMC_MSG_GetCurrentRpm)) == 0, "Attempt to get current RPM from SMC Failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, - current_rpm)) == 0, - "Attempt to read current RPM from SMC Failed!", - return ret); + *current_rpm = smum_get_argument(hwmgr); return 0; } +int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed) +{ + struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + uint32_t current_rpm, percent = 0; + int ret = 0; + + ret = vega20_get_current_rpm(hwmgr, ¤t_rpm); + if (ret) + return ret; + + percent = current_rpm * 100 / pp_table->FanMaximumRpm; + + *speed = percent > 100 ? 100 : percent; + + return 0; +} + +int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + + if (speed > 100) + speed = 100; + + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) + vega20_fan_ctrl_stop_smc_fan_control(hwmgr); + + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); + + return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info) { memset(fan_speed_info, 0, sizeof(*fan_speed_info)); - fan_speed_info->supports_percent_read = false; - fan_speed_info->supports_percent_write = false; + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; fan_speed_info->supports_rpm_read = true; fan_speed_info->supports_rpm_write = true; @@ -64,6 +183,31 @@ int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) return vega20_get_current_rpm(hwmgr, speed); } +int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + struct amdgpu_device *adev = hwmgr->adev; + uint32_t tach_period, crystal_clock_freq; + int result = 0; + + if (!speed) + return -EINVAL; + + if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) { + result = vega20_fan_ctrl_stop_smc_fan_control(hwmgr); + if (result) + return result; + } + + crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), + CG_TACH_CTRL, TARGET_PERIOD, + tach_period)); + + return vega20_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); +} + /** * Reads the remote temperature from the SIslands thermal controller. * diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h index 2a6d49fec4e0..2d1769bbd24e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.h @@ -50,15 +50,22 @@ struct vega20_temperature { #define FDO_PWM_MODE_STATIC_RPM 5 extern int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr); -extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); extern int vega20_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); -extern int vega20_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); extern int vega20_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed); +extern int vega20_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega20_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega20_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega20_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern int vega20_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr); extern int vega20_thermal_disable_alert(struct pp_hwmgr *hwmgr); extern int vega20_start_thermal_controller(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); +extern int vega20_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index a6d92128b19c..e5a60aa44b5d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -328,6 +328,8 @@ struct pp_hwmgr_func { int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); int (*powergate_mmhub)(struct pp_hwmgr *hwmgr); int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); + int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate); + int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); }; struct pp_table_func { @@ -732,7 +734,6 @@ struct pp_hwmgr { void *smu_backend; const struct pp_smumgr_func *smumgr_funcs; bool is_kicker; - bool reload_fw; enum PP_DAL_POWERLEVEL dal_power_level; struct phm_dynamic_state_info dyn_state; diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h index 71191deb4e76..2998a49960ed 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x11 +#define SMU11_DRIVER_IF_VERSION 0x12 #define PPTABLE_V20_SMU_VERSION 2 @@ -165,7 +165,7 @@ #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) #define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) #define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) - +#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) #define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 @@ -186,6 +186,9 @@ #define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 #define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000 +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + #define VR_MAPPING_VR_SELECT_MASK 0x01 #define VR_MAPPING_VR_SELECT_SHIFT 0x00 @@ -208,15 +211,17 @@ #define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 #define THROTTLER_STATUS_TEMP_HBM_BIT 3 #define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 -#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5 -#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6 -#define THROTTLER_STATUS_TEMP_PLX_BIT 7 -#define THROTTLER_STATUS_TEMP_SKIN_BIT 8 -#define THROTTLER_STATUS_TDC_GFX_BIT 9 -#define THROTTLER_STATUS_TDC_SOC_BIT 10 -#define THROTTLER_STATUS_PPT_BIT 11 -#define THROTTLER_STATUS_FIT_BIT 12 -#define THROTTLER_STATUS_PPM_BIT 13 +#define THROTTLER_STATUS_TEMP_VR_SOC_BIT 5 +#define THROTTLER_STATUS_TEMP_VR_MEM0_BIT 6 +#define THROTTLER_STATUS_TEMP_VR_MEM1_BIT 7 +#define THROTTLER_STATUS_TEMP_LIQUID_BIT 8 +#define THROTTLER_STATUS_TEMP_PLX_BIT 9 +#define THROTTLER_STATUS_TEMP_SKIN_BIT 10 +#define THROTTLER_STATUS_TDC_GFX_BIT 11 +#define THROTTLER_STATUS_TDC_SOC_BIT 12 +#define THROTTLER_STATUS_PPT_BIT 13 +#define THROTTLER_STATUS_FIT_BIT 14 +#define THROTTLER_STATUS_PPM_BIT 15 #define TABLE_TRANSFER_OK 0x0 @@ -236,6 +241,58 @@ #define XGMI_STATE_D0 1 #define XGMI_STATE_D3 0 +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, + I2C_CONTROLLER_PORT_1 = 1, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VDDCI, + I2C_CONTROLLER_NAME_VR_HBM, + I2C_CONTROLLER_NAME_LIQUID_0, + I2C_CONTROLLER_NAME_LIQUID_1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VDDCI, + I2C_CONTROLLER_THROTTLER_VR_HBM, + I2C_CONTROLLER_THROTTLER_LIQUID_0, + I2C_CONTROLLER_THROTTLER_LIQUID_1, + I2C_CONTROLLER_THROTTLER_PLX, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_SPARE_2, +} I2cControllerProtocol_e; + +typedef enum { + I2C_CONTROLLER_SPEED_SLOW = 0, + I2C_CONTROLLER_SPEED_FAST = 1, +} I2cControllerSpeed_e; + +typedef struct { + uint32_t Enabled; + uint32_t SlaveAddress; + uint32_t ControllerPort; + uint32_t ControllerName; + + uint32_t ThermalThrottler; + uint32_t I2cProtocol; + uint32_t I2cSpeed; +} I2cControllerConfig_t; + typedef struct { uint32_t a; uint32_t b; @@ -269,6 +326,12 @@ typedef enum { } PPCLK_e; typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { VOLTAGE_MODE_AVFS = 0, VOLTAGE_MODE_AVFS_SS, VOLTAGE_MODE_SS, @@ -328,8 +391,8 @@ typedef struct { uint16_t PpmTemperatureThreshold; uint8_t MemoryOnPackage; - uint8_t padding8_limits[3]; - + uint8_t padding8_limits; + uint16_t Tvr_SocLimit; uint16_t UlvVoltageOffsetSoc; uint16_t UlvVoltageOffsetGfx; @@ -400,8 +463,8 @@ typedef struct { uint16_t FanGainEdge; uint16_t FanGainHotspot; uint16_t FanGainLiquid; - uint16_t FanGainVrVddc; - uint16_t FanGainVrMvdd; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; uint16_t FanGainPlx; uint16_t FanGainHbm; uint16_t FanPwmMin; @@ -438,7 +501,7 @@ typedef struct { uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; uint8_t Padding8_GfxBtc[2]; - uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; + int16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; @@ -461,24 +524,14 @@ typedef struct { uint16_t MGpuFanBoostLimitRpm; uint16_t padding16_Fan; - uint32_t Reserved[13]; - + uint16_t FanGainVrMem0; + uint16_t FanGainVrMem1; + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; - uint8_t Liquid1_I2C_address; - uint8_t Liquid2_I2C_address; - uint8_t Vr_I2C_address; - uint8_t Plx_I2C_address; + uint32_t Reserved[11]; - uint8_t Liquid_I2C_LineSCL; - uint8_t Liquid_I2C_LineSDA; - uint8_t Vr_I2C_LineSCL; - uint8_t Vr_I2C_LineSDA; - - uint8_t Plx_I2C_LineSCL; - uint8_t Plx_I2C_LineSDA; - uint8_t VrSensorPresent; - uint8_t LiquidSensorPresent; + uint32_t Padding32[3]; uint16_t MaxVoltageStepGfx; uint16_t MaxVoltageStepSoc; @@ -545,6 +598,8 @@ typedef struct { uint8_t FllGfxclkSpreadPercent; uint16_t FllGfxclkSpreadFreq; + I2cControllerConfig_t I2cControllers[I2C_CONTROLLER_NAME_COUNT]; + uint32_t BoardReserved[10]; @@ -601,7 +656,9 @@ typedef struct { uint16_t TemperatureHotspot ; uint16_t TemperatureHBM ; uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; uint16_t TemperatureLiquid ; uint16_t TemperaturePlx ; uint32_t ThrottlerStatus ; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 18643e06bc6f..669bd0c2a16c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2269,11 +2269,13 @@ static uint32_t ci_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU7_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT); } + break; } pr_debug("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index ec14798e87b6..bc8375cbf297 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -302,16 +302,6 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) hwmgr->avfs_supported = false; } - /* To initialize all clock gating before RLC loaded and running.*/ - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); - /* Setup SoftRegsStart here for register lookup in case * DummyBackEnd is used and ProcessFirmwareHeader is not executed */ @@ -2331,6 +2321,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU73_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2340,6 +2331,7 @@ static uint32_t fiji_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU73_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index 73aa368a454e..375ccf6ff5f2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -232,26 +232,25 @@ static int iceland_request_smu_load_specific_fw(struct pp_hwmgr *hwmgr, static int iceland_start_smu(struct pp_hwmgr *hwmgr) { + struct iceland_smumgr *priv = hwmgr->smu_backend; int result; - result = iceland_smu_upload_firmware_image(hwmgr); - if (result) - return result; - result = iceland_smu_start_smc(hwmgr); - if (result) - return result; - if (!smu7_is_smc_ram_running(hwmgr)) { - pr_info("smu not running, upload firmware again \n"); result = iceland_smu_upload_firmware_image(hwmgr); if (result) return result; - result = iceland_smu_start_smc(hwmgr); - if (result) - return result; + iceland_smu_start_smc(hwmgr); } + /* Setup SoftRegsStart here to visit the register UcodeLoadStatus + * to check fw loading state + */ + smu7_read_smc_sram_dword(hwmgr, + SMU71_FIRMWARE_HEADER_LOCATION + + offsetof(SMU71_Firmware_Header, SoftRegisters), + &(priv->smu7_data.soft_regs_start), 0x40000); + result = smu7_request_smu_load_fw(hwmgr); return result; @@ -2237,11 +2236,13 @@ static uint32_t iceland_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU71_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case LowSclkInterruptThreshold: return offsetof(SMU71_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; @@ -2662,7 +2663,7 @@ const struct pp_smumgr_func iceland_smu_funcs = { .smu_fini = &smu7_smu_fini, .start_smu = &iceland_start_smu, .check_fw_load_finish = &smu7_check_fw_load_finish, - .request_smu_load_fw = &smu7_reload_firmware, + .request_smu_load_fw = &smu7_request_smu_load_fw, .request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw, .send_msg_to_smc = &smu7_send_msg_to_smc, .send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 6f961dec2088..d0eb8ab50148 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -186,40 +186,12 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr) return 0; } -/* sdma is disabled by default in vbios, need to re-enable in driver */ -static void smu10_smc_enable_sdma(struct pp_hwmgr *hwmgr) -{ - smu10_send_msg_to_smc(hwmgr, - PPSMC_MSG_PowerUpSdma); -} - -static void smu10_smc_disable_sdma(struct pp_hwmgr *hwmgr) -{ - smu10_send_msg_to_smc(hwmgr, - PPSMC_MSG_PowerDownSdma); -} - -/* vcn is disabled by default in vbios, need to re-enable in driver */ -static void smu10_smc_enable_vcn(struct pp_hwmgr *hwmgr) -{ - smu10_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PowerUpVcn, 0); -} - -static void smu10_smc_disable_vcn(struct pp_hwmgr *hwmgr) -{ - smu10_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PowerDownVcn, 0); -} - static int smu10_smu_fini(struct pp_hwmgr *hwmgr) { struct smu10_smumgr *priv = (struct smu10_smumgr *)(hwmgr->smu_backend); if (priv) { - smu10_smc_disable_sdma(hwmgr); - smu10_smc_disable_vcn(hwmgr); amdgpu_bo_free_kernel(&priv->smu_tables.entry[SMU10_WMTABLE].handle, &priv->smu_tables.entry[SMU10_WMTABLE].mc_addr, &priv->smu_tables.entry[SMU10_WMTABLE].table); @@ -243,8 +215,7 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr) if (smu10_verify_smc_interface(hwmgr)) return -EINVAL; - smu10_smc_enable_sdma(hwmgr); - smu10_smc_enable_vcn(hwmgr); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 186dafc7f166..3f51d545e8ff 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -302,44 +302,6 @@ int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_ return 0; } -/* Convert the firmware type to SMU type mask. For MEC, we need to check all MEC related type */ - -static uint32_t smu7_get_mask_for_firmware_type(uint32_t fw_type) -{ - uint32_t result = 0; - - switch (fw_type) { - case UCODE_ID_SDMA0: - result = UCODE_ID_SDMA0_MASK; - break; - case UCODE_ID_SDMA1: - result = UCODE_ID_SDMA1_MASK; - break; - case UCODE_ID_CP_CE: - result = UCODE_ID_CP_CE_MASK; - break; - case UCODE_ID_CP_PFP: - result = UCODE_ID_CP_PFP_MASK; - break; - case UCODE_ID_CP_ME: - result = UCODE_ID_CP_ME_MASK; - break; - case UCODE_ID_CP_MEC: - case UCODE_ID_CP_MEC_JT1: - case UCODE_ID_CP_MEC_JT2: - result = UCODE_ID_CP_MEC_MASK; - break; - case UCODE_ID_RLC_G: - result = UCODE_ID_RLC_G_MASK; - break; - default: - pr_info("UCode type is out of range! \n"); - result = 0; - } - - return result; -} - static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, uint32_t fw_type, struct SMU_Entry *entry) @@ -381,10 +343,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) uint32_t fw_to_load; int r = 0; - if (!hwmgr->reload_fw) { - pr_info("skip reloading...\n"); - return 0; - } + amdgpu_ucode_init_bo(hwmgr->adev); if (smu_data->soft_regs_start) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, @@ -467,10 +426,13 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr)); smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr)); - if (smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load)) - pr_err("Fail to Request SMU Load uCode"); + smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load); - return r; + r = smu7_check_fw_load_finish(hwmgr, fw_to_load); + if (!r) + return 0; + + pr_err("SMU load firmware failed\n"); failed: kfree(smu_data->toc); @@ -482,13 +444,12 @@ failed: int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type) { struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend); - uint32_t fw_mask = smu7_get_mask_for_firmware_type(fw_type); uint32_t ret; ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11, smu_data->soft_regs_start + smum_get_offsetof(hwmgr, SMU_SoftRegisters, UcodeLoadStatus), - fw_mask, fw_mask); + fw_type, fw_type); return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index f7e3bc22bb93..f836d30fdd44 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -658,11 +658,10 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr) { struct smu8_smumgr *smu8_smu = hwmgr->smu_backend; uint32_t smc_address; + uint32_t fw_to_check = 0; + int ret; - if (!hwmgr->reload_fw) { - pr_info("skip reloading...\n"); - return 0; - } + amdgpu_ucode_init_bo(hwmgr->adev); smu8_smu_populate_firmware_entries(hwmgr); @@ -689,28 +688,9 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr) smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, smu8_smu->toc_entry_power_profiling_index); - return smu8_send_msg_to_smc_with_parameter(hwmgr, + smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob, smu8_smu->toc_entry_initialize_index); -} - -static int smu8_start_smu(struct pp_hwmgr *hwmgr) -{ - int ret = 0; - uint32_t fw_to_check = 0; - struct amdgpu_device *adev = hwmgr->adev; - - uint32_t index = SMN_MP1_SRAM_START_ADDR + - SMU8_FIRMWARE_HEADER_LOCATION + - offsetof(struct SMU8_Firmware_Header, Version); - - - if (hwmgr == NULL || hwmgr->device == NULL) - return -EINVAL; - - cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); - hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA); - adev->pm.fw_version = hwmgr->smu_version >> 8; fw_to_check = UCODE_ID_RLC_G_MASK | UCODE_ID_SDMA0_MASK | @@ -724,17 +704,38 @@ static int smu8_start_smu(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_STONEY) fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK); - ret = smu8_request_smu_load_fw(hwmgr); - if (ret) + ret = smu8_check_fw_load_finish(hwmgr, fw_to_check); + if (ret) { pr_err("SMU firmware load failed\n"); - - smu8_check_fw_load_finish(hwmgr, fw_to_check); + return ret; + } ret = smu8_load_mec_firmware(hwmgr); - if (ret) + if (ret) { pr_err("Mec Firmware load failed\n"); + return ret; + } - return ret; + return 0; +} + +static int smu8_start_smu(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + uint32_t index = SMN_MP1_SRAM_START_ADDR + + SMU8_FIRMWARE_HEADER_LOCATION + + offsetof(struct SMU8_Firmware_Header, Version); + + + if (hwmgr == NULL || hwmgr->device == NULL) + return -EINVAL; + + cgs_write_register(hwmgr->device, mmMP0PUB_IND_INDEX, index); + hwmgr->smu_version = cgs_read_register(hwmgr->device, mmMP0PUB_IND_DATA); + adev->pm.fw_version = hwmgr->smu_version >> 8; + + return smu8_request_smu_load_fw(hwmgr); } static int smu8_smu_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index ae8378ed32ee..3ed6c5f1e5cf 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -192,6 +192,7 @@ static int tonga_start_in_non_protection_mode(struct pp_hwmgr *hwmgr) static int tonga_start_smu(struct pp_hwmgr *hwmgr) { + struct tonga_smumgr *priv = hwmgr->smu_backend; int result; /* Only start SMC if SMC RAM is not running */ @@ -209,6 +210,14 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) } } + /* Setup SoftRegsStart here to visit the register UcodeLoadStatus + * to check fw loading state + */ + smu7_read_smc_sram_dword(hwmgr, + SMU72_FIRMWARE_HEADER_LOCATION + + offsetof(SMU72_Firmware_Header, SoftRegisters), + &(priv->smu7_data.soft_regs_start), 0x40000); + result = smu7_request_smu_load_fw(hwmgr); return result; @@ -2619,6 +2628,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU72_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2628,6 +2638,7 @@ static uint32_t tonga_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU72_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 5d19115f410c..c81acc3192ad 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -88,8 +88,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, return 0; } -static int vega10_get_smc_features(struct pp_hwmgr *hwmgr, - uint32_t *features_enabled) +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, + bool enable, uint32_t feature_mask) +{ + int msg = enable ? PPSMC_MSG_EnableSmuFeatures : + PPSMC_MSG_DisableSmuFeatures; + + return smum_send_msg_to_smc_with_parameter(hwmgr, + msg, feature_mask); +} + +int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, + uint64_t *features_enabled) { if (features_enabled == NULL) return -EINVAL; @@ -102,9 +112,9 @@ static int vega10_get_smc_features(struct pp_hwmgr *hwmgr, static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr) { - uint32_t features_enabled = 0; + uint64_t features_enabled = 0; - vega10_get_smc_features(hwmgr, &features_enabled); + vega10_get_enabled_smc_features(hwmgr, &features_enabled); if (features_enabled & SMC_DPM_FEATURES) return true; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h index 424e868bc768..bad760f22624 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -42,6 +42,10 @@ struct vega10_smumgr { struct smu_table_array smu_tables; }; +int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, + bool enable, uint32_t feature_mask); +int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr, + uint64_t *features_enabled); #endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 7f0e2109f40d..ddb801517667 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -37,8 +37,8 @@ * @param hwmgr the address of the HW manager * @param table_id the driver's table ID to copy from */ -int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id) +static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) { struct vega12_smumgr *priv = (struct vega12_smumgr *)(hwmgr->smu_backend); @@ -75,8 +75,8 @@ int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the HW manager * @param table_id the table to copy from */ -int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id) +static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) { struct vega12_smumgr *priv = (struct vega12_smumgr *)(hwmgr->smu_backend); @@ -351,6 +351,19 @@ static int vega12_start_smu(struct pp_hwmgr *hwmgr) return 0; } +static int vega12_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, + uint16_t table_id, bool rw) +{ + int ret; + + if (rw) + ret = vega12_copy_table_from_smc(hwmgr, table, table_id); + else + ret = vega12_copy_table_to_smc(hwmgr, table, table_id); + + return ret; +} + const struct pp_smumgr_func vega12_smu_funcs = { .smu_init = &vega12_smu_init, .smu_fini = &vega12_smu_fini, @@ -362,4 +375,5 @@ const struct pp_smumgr_func vega12_smu_funcs = { .upload_pptable_settings = NULL, .is_dpm_running = vega12_is_dpm_running, .get_argument = smu9_get_argument, + .smc_table_manager = vega12_smc_table_manager, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h index b285cbc04019..aeec965ce81f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.h @@ -48,10 +48,6 @@ struct vega12_smumgr { #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 -int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id); -int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id); int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint64_t feature_mask); int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index fe7f71079e0e..b7ff7d4d6f44 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -148,19 +148,11 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, return (ret == PPSMC_Result_OK) ? 0 : -EIO; } -/* - * Retrieve an argument from SMC. - * @param hwmgr the address of the powerplay hardware manager. - * @param arg pointer to store the argument from SMC. - * @return Always return 0. - */ -int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) +static uint32_t vega20_get_argument(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); - - return 0; + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } /* @@ -168,8 +160,8 @@ int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) * @param hwmgr the address of the HW manager * @param table_id the driver's table ID to copy from */ -int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id) +static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); @@ -208,8 +200,8 @@ int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, * @param hwmgr the address of the HW manager * @param table_id the table to copy from */ -int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id) +static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, + uint8_t *table, int16_t table_id) { struct vega20_smumgr *priv = (struct vega20_smumgr *)(hwmgr->smu_backend); @@ -345,18 +337,12 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, - &smc_features_low)) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features Low argument failed!", - return ret); + smc_features_low = vega20_get_argument(hwmgr); PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", return ret); - PP_ASSERT_WITH_CODE((ret = vega20_read_arg_from_smc(hwmgr, - &smc_features_high)) == 0, - "[GetEnabledSMCFeatures] Attemp to read SMU features High argument failed!", - return ret); + smc_features_high = vega20_get_argument(hwmgr); *features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) | (((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK)); @@ -574,6 +560,19 @@ static bool vega20_is_dpm_running(struct pp_hwmgr *hwmgr) return false; } +static int vega20_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, + uint16_t table_id, bool rw) +{ + int ret; + + if (rw) + ret = vega20_copy_table_from_smc(hwmgr, table, table_id); + else + ret = vega20_copy_table_to_smc(hwmgr, table, table_id); + + return ret; +} + const struct pp_smumgr_func vega20_smu_funcs = { .smu_init = &vega20_smu_init, .smu_fini = &vega20_smu_fini, @@ -584,4 +583,6 @@ const struct pp_smumgr_func vega20_smu_funcs = { .download_pptable_settings = NULL, .upload_pptable_settings = NULL, .is_dpm_running = vega20_is_dpm_running, + .get_argument = vega20_get_argument, + .smc_table_manager = vega20_smc_table_manager, }; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h index 505eb0d82e3b..77349c3f0162 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h @@ -47,11 +47,6 @@ struct vega20_smumgr { #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 -int vega20_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg); -int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id); -int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr, - uint8_t *table, int16_t table_id); int vega20_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint64_t feature_mask); int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index 3d415fabbd93..9f71512b2510 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -2185,6 +2185,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case DRAM_LOG_BUFF_SIZE: return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); } + break; case SMU_Discrete_DpmTable: switch (member) { case UvdBootLevel: @@ -2194,6 +2195,7 @@ static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) case LowSclkInterruptThreshold: return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); } + break; } pr_warn("can't get the offset of type %x member %x\n", type, member); return 0; |