From e52482dec8366a98ac380b3bdc1a4abb8a390914 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:28 -0400 Subject: drm/amdgpu: Add MMU notifier type for KFD userptr This commit adds the notion of MMU notifier types GFX and HSA. GFX continues to work like MMU notifiers did before. HSA adds support for KFD userptr BOs. The implementation of KFD userptr eviction is a stub for now. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 94 ++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h | 11 ++- 5 files changed, 97 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index c2c2bea731e0..83e0c5c331d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -104,6 +104,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d6e1479da38..2463ff6ac9ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1418,6 +1418,13 @@ bo_reserve_failed: return ret; } +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, + struct mm_struct *mm) +{ + /* TODO */ + return 0; +} + /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given * KFD process identified by process_info * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc34b50e6b29..8e66f3702b7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -536,7 +536,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (p->bo_list) { amdgpu_bo_list_get_list(p->bo_list, &p->validated); if (p->bo_list->first_userptr != p->bo_list->num_entries) - p->mn = amdgpu_mn_get(p->adev); + p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX); } INIT_LIST_HEAD(&duplicates); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index bd67f4cb8e6c..f2ed18e2ff03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -36,12 +36,14 @@ #include #include "amdgpu.h" +#include "amdgpu_amdkfd.h" struct amdgpu_mn { /* constant after initialisation */ struct amdgpu_device *adev; struct mm_struct *mm; struct mmu_notifier mn; + enum amdgpu_mn_type type; /* only used on destruction */ struct work_struct work; @@ -185,7 +187,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, } /** - * amdgpu_mn_invalidate_range_start - callback to notify about mm change + * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change * * @mn: our notifier * @mn: the mm this callback is about @@ -195,10 +197,10 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, * We block for all BOs between start and end to be idle and * unmap them by move them into system domain again. */ -static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end) +static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) { struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; @@ -219,6 +221,49 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, } } +/** + * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change + * + * @mn: our notifier + * @mn: the mm this callback is about + * @start: start of updated range + * @end: end of updated range + * + * We temporarily evict all BOs between start and end. This + * necessitates evicting all user-mode queues of the process. The BOs + * are restorted in amdgpu_mn_invalidate_range_end_hsa. + */ +static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn); + struct interval_tree_node *it; + + /* notification is exclusive, but interval is inclusive */ + end -= 1; + + amdgpu_mn_read_lock(rmn); + + it = interval_tree_iter_first(&rmn->objects, start, end); + while (it) { + struct amdgpu_mn_node *node; + struct amdgpu_bo *bo; + + node = container_of(it, struct amdgpu_mn_node, it); + it = interval_tree_iter_next(it, start, end); + + list_for_each_entry(bo, &node->bos, mn_list) { + struct kgd_mem *mem = bo->kfd_bo; + + if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, + start, end)) + amdgpu_amdkfd_evict_userptr(mem, mm); + } + } +} + /** * amdgpu_mn_invalidate_range_end - callback to notify about mm change * @@ -239,23 +284,39 @@ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, amdgpu_mn_read_unlock(rmn); } -static const struct mmu_notifier_ops amdgpu_mn_ops = { - .release = amdgpu_mn_release, - .invalidate_range_start = amdgpu_mn_invalidate_range_start, - .invalidate_range_end = amdgpu_mn_invalidate_range_end, +static const struct mmu_notifier_ops amdgpu_mn_ops[] = { + [AMDGPU_MN_TYPE_GFX] = { + .release = amdgpu_mn_release, + .invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, + }, + [AMDGPU_MN_TYPE_HSA] = { + .release = amdgpu_mn_release, + .invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa, + .invalidate_range_end = amdgpu_mn_invalidate_range_end, + }, }; +/* Low bits of any reasonable mm pointer will be unused due to struct + * alignment. Use these bits to make a unique key from the mm pointer + * and notifier type. + */ +#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) + /** * amdgpu_mn_get - create notifier context * * @adev: amdgpu device pointer + * @type: type of MMU notifier context * * Creates a notifier context for current->mm. */ -struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, + enum amdgpu_mn_type type) { struct mm_struct *mm = current->mm; struct amdgpu_mn *rmn; + unsigned long key = AMDGPU_MN_KEY(mm, type); int r; mutex_lock(&adev->mn_lock); @@ -264,8 +325,8 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) return ERR_PTR(-EINTR); } - hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm) - if (rmn->mm == mm) + hash_for_each_possible(adev->mn_hash, rmn, node, key) + if (AMDGPU_MN_KEY(rmn->mm, rmn->type) == key) goto release_locks; rmn = kzalloc(sizeof(*rmn), GFP_KERNEL); @@ -276,8 +337,9 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) rmn->adev = adev; rmn->mm = mm; - rmn->mn.ops = &amdgpu_mn_ops; init_rwsem(&rmn->lock); + rmn->type = type; + rmn->mn.ops = &amdgpu_mn_ops[type]; rmn->objects = RB_ROOT_CACHED; mutex_init(&rmn->read_lock); atomic_set(&rmn->recursion, 0); @@ -286,7 +348,7 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) if (r) goto free_rmn; - hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm); + hash_add(adev->mn_hash, &rmn->node, AMDGPU_MN_KEY(mm, type)); release_locks: up_write(&mm->mmap_sem); @@ -315,12 +377,14 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) { unsigned long end = addr + amdgpu_bo_size(bo) - 1; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + enum amdgpu_mn_type type = + bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; struct amdgpu_mn *rmn; struct amdgpu_mn_node *node = NULL; struct list_head bos; struct interval_tree_node *it; - rmn = amdgpu_mn_get(adev); + rmn = amdgpu_mn_get(adev, type); if (IS_ERR(rmn)) return PTR_ERR(rmn); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h index d0095a3793b8..eb0f432f78fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h @@ -29,16 +29,23 @@ */ struct amdgpu_mn; +enum amdgpu_mn_type { + AMDGPU_MN_TYPE_GFX, + AMDGPU_MN_TYPE_HSA, +}; + #if defined(CONFIG_MMU_NOTIFIER) void amdgpu_mn_lock(struct amdgpu_mn *mn); void amdgpu_mn_unlock(struct amdgpu_mn *mn); -struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev); +struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, + enum amdgpu_mn_type type); int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); void amdgpu_mn_unregister(struct amdgpu_bo *bo); #else static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {} static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {} -static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev) +static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, + enum amdgpu_mn_type type) { return NULL; } -- cgit v1.2.1 From 0919195f2b0d7437cb0de49b8975fdd7b5575490 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:29 -0400 Subject: drm/amdgpu: Enable amdgpu_ttm_tt_get_user_pages in worker threads This commit allows amdgpu_ttm_tt_get_user_pages to work in a worker thread rather than regular process context. This will be used when KFD userptr BOs are restored after an MMU-notifier eviction. v2: Manage task reference with get_task_struct/put_task_struct Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Acked-by: Alex Deucher Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 38 +++++++++++++++++++++++++-------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 205da3ff9cd0..c713d30cba86 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -695,7 +695,7 @@ struct amdgpu_ttm_tt { struct ttm_dma_tt ttm; u64 offset; uint64_t userptr; - struct mm_struct *usermm; + struct task_struct *usertask; uint32_t userflags; spinlock_t guptasklock; struct list_head guptasks; @@ -706,14 +706,18 @@ struct amdgpu_ttm_tt { int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct mm_struct *mm = gtt->usertask->mm; unsigned int flags = 0; unsigned pinned = 0; int r; + if (!mm) /* Happens during process shutdown */ + return -ESRCH; + if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) flags |= FOLL_WRITE; - down_read(¤t->mm->mmap_sem); + down_read(&mm->mmap_sem); if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { /* check that we only use anonymous memory @@ -721,9 +725,9 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; struct vm_area_struct *vma; - vma = find_vma(gtt->usermm, gtt->userptr); + vma = find_vma(mm, gtt->userptr); if (!vma || vma->vm_file || vma->vm_end < end) { - up_read(¤t->mm->mmap_sem); + up_read(&mm->mmap_sem); return -EPERM; } } @@ -739,7 +743,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) list_add(&guptask.list, >t->guptasks); spin_unlock(>t->guptasklock); - r = get_user_pages(userptr, num_pages, flags, p, NULL); + if (mm == current->mm) + r = get_user_pages(userptr, num_pages, flags, p, NULL); + else + r = get_user_pages_remote(gtt->usertask, + mm, userptr, num_pages, + flags, p, NULL, NULL); spin_lock(>t->guptasklock); list_del(&guptask.list); @@ -752,12 +761,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) } while (pinned < ttm->num_pages); - up_read(¤t->mm->mmap_sem); + up_read(&mm->mmap_sem); return 0; release_pages: release_pages(pages, pinned); - up_read(¤t->mm->mmap_sem); + up_read(&mm->mmap_sem); return r; } @@ -978,6 +987,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; + if (gtt->usertask) + put_task_struct(gtt->usertask); + ttm_dma_tt_fini(>t->ttm); kfree(gtt); } @@ -1079,8 +1091,13 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return -EINVAL; gtt->userptr = addr; - gtt->usermm = current->mm; gtt->userflags = flags; + + if (gtt->usertask) + put_task_struct(gtt->usertask); + gtt->usertask = current->group_leader; + get_task_struct(gtt->usertask); + spin_lock_init(>t->guptasklock); INIT_LIST_HEAD(>t->guptasks); atomic_set(>t->mmu_invalidations, 0); @@ -1096,7 +1113,10 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) if (gtt == NULL) return NULL; - return gtt->usermm; + if (gtt->usertask == NULL) + return NULL; + + return gtt->usertask->mm; } bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, -- cgit v1.2.1 From 6e08e0995b8f339fd2a7ee4fa11f17396405ef60 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:30 -0400 Subject: drm/amdgpu: Avoid reclaim while holding locks taken in MMU notifier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When an MMU notifier runs in memory reclaim context, it can deadlock trying to take locks that are already held in the thread causing the memory reclaim. The solution is to avoid memory reclaim while holding locks that are taken in MMU notifiers. This commit fixes kmalloc while holding rmn->lock by moving the call outside the lock. The GFX MMU notifier also locks reservation objects. I have no good solution for avoiding reclaim while holding reservation objects. The HSA MMU notifier will not lock any reservation objects. v2: Moved allocation outside lock instead of using GFP_NOIO Signed-off-by: Felix Kuehling Acked-by: Oded Gabbay Reviewed-by: Christian König Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index f2ed18e2ff03..83e344fbb50a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -380,7 +380,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) enum amdgpu_mn_type type = bo->kfd_bo ? AMDGPU_MN_TYPE_HSA : AMDGPU_MN_TYPE_GFX; struct amdgpu_mn *rmn; - struct amdgpu_mn_node *node = NULL; + struct amdgpu_mn_node *node = NULL, *new_node; struct list_head bos; struct interval_tree_node *it; @@ -388,6 +388,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) if (IS_ERR(rmn)) return PTR_ERR(rmn); + new_node = kmalloc(sizeof(*new_node), GFP_KERNEL); + if (!new_node) + return -ENOMEM; + INIT_LIST_HEAD(&bos); down_write(&rmn->lock); @@ -401,13 +405,10 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) list_splice(&node->bos, &bos); } - if (!node) { - node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL); - if (!node) { - up_write(&rmn->lock); - return -ENOMEM; - } - } + if (!node) + node = new_node; + else + kfree(new_node); bo->mn = rmn; -- cgit v1.2.1 From d1853f42b63da94fa0147091d22bf5675b0ff89b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:31 -0400 Subject: drm/amdkfd: GFP_NOIO while holding locks taken in MMU notifier When an MMU notifier runs in memory reclaim context, it can deadlock trying to take locks that are already held in the thread causing the memory reclaim. The solution is to avoid memory reclaim while holding locks that are taken in MMU notifiers by using GFP_NOIO. This commit fixes memory allocations done while holding the dqm->lock which is needed in the MMU notifier (dqm->ops.evict_process_queues). Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3346699960dd..0434f659eeaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -652,7 +652,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) return -ENOMEM; - *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); + *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); if ((*mem_obj) == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index c00c325ed3c9..2bc49c62cc8c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -412,7 +412,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) return NULL; - mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); + mqd = kzalloc(sizeof(*mqd), GFP_NOIO); if (!mqd) return NULL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 89e4242e43e7..481307b8b4db 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -394,7 +394,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) return NULL; - mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); + mqd = kzalloc(sizeof(*mqd), GFP_NOIO); if (!mqd) return NULL; -- cgit v1.2.1 From 6b95e7973a136181e37446bd29b0b2e2f0d2d653 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:32 -0400 Subject: drm/amdkfd: Add quiesce_mm and resume_mm to kgd2kfd_calls These interfaces allow KGD to stop and resume all GPU user mode queue access to a process address space. This is needed for handling MMU notifiers of userptrs mapped for GPU access in KFD VMs. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 38 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 2 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 +++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 10 +++---- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 6 ++++ 5 files changed, 55 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0434f659eeaf..7b5799530c0f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -541,6 +541,44 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) spin_unlock(&kfd->interrupt_lock); } +int kgd2kfd_quiesce_mm(struct mm_struct *mm) +{ + struct kfd_process *p; + int r; + + /* Because we are called from arbitrary context (workqueue) as opposed + * to process context, kfd_process could attempt to exit while we are + * running so the lookup function increments the process ref count. + */ + p = kfd_lookup_process_by_mm(mm); + if (!p) + return -ESRCH; + + r = kfd_process_evict_queues(p); + + kfd_unref_process(p); + return r; +} + +int kgd2kfd_resume_mm(struct mm_struct *mm) +{ + struct kfd_process *p; + int r; + + /* Because we are called from arbitrary context (workqueue) as opposed + * to process context, kfd_process could attempt to exit while we are + * running so the lookup function increments the process ref count. + */ + p = kfd_lookup_process_by_mm(mm); + if (!p) + return -ESRCH; + + r = kfd_process_restore_queues(p); + + kfd_unref_process(p); + return r; +} + /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will * prepare for safe eviction of KFD BOs that belong to the specified * process. diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index e0c07d24d251..45bc458f7348 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -43,6 +43,8 @@ static const struct kgd2kfd_calls kgd2kfd = { .interrupt = kgd2kfd_interrupt, .suspend = kgd2kfd_suspend, .resume = kgd2kfd_resume, + .quiesce_mm = kgd2kfd_quiesce_mm, + .resume_mm = kgd2kfd_resume_mm, .schedule_evict_and_restore_process = kgd2kfd_schedule_evict_and_restore_process, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 96a9cc0f02c9..4d5c49ef2dc5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -512,6 +512,8 @@ struct qcm_process_device { /* Approx. time before evicting the process again */ #define PROCESS_ACTIVE_TIME_MS 10 +int kgd2kfd_quiesce_mm(struct mm_struct *mm); +int kgd2kfd_resume_mm(struct mm_struct *mm); int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, struct dma_fence *fence); @@ -681,6 +683,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *); struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); void kfd_unref_process(struct kfd_process *p); +int kfd_process_evict_queues(struct kfd_process *p); +int kfd_process_restore_queues(struct kfd_process *p); void kfd_suspend_all_processes(void); int kfd_resume_all_processes(void); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 1711ad0642f7..2791e72c2058 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -808,7 +808,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm) * Eviction is reference-counted per process-device. This means multiple * evictions from different sources can be nested safely. */ -static int process_evict_queues(struct kfd_process *p) +int kfd_process_evict_queues(struct kfd_process *p) { struct kfd_process_device *pdd; int r = 0; @@ -844,7 +844,7 @@ fail: } /* process_restore_queues - Restore all user queues of a process */ -static int process_restore_queues(struct kfd_process *p) +int kfd_process_restore_queues(struct kfd_process *p) { struct kfd_process_device *pdd; int r, ret = 0; @@ -886,7 +886,7 @@ static void evict_process_worker(struct work_struct *work) flush_delayed_work(&p->restore_work); pr_debug("Started evicting pasid %d\n", p->pasid); - ret = process_evict_queues(p); + ret = kfd_process_evict_queues(p); if (!ret) { dma_fence_signal(p->ef); dma_fence_put(p->ef); @@ -946,7 +946,7 @@ static void restore_process_worker(struct work_struct *work) return; } - ret = process_restore_queues(p); + ret = kfd_process_restore_queues(p); if (!ret) pr_debug("Finished restoring pasid %d\n", p->pasid); else @@ -963,7 +963,7 @@ void kfd_suspend_all_processes(void) cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); - if (process_evict_queues(p)) + if (kfd_process_evict_queues(p)) pr_err("Failed to suspend process %d\n", p->pasid); dma_fence_signal(p->ef); dma_fence_put(p->ef); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 237289a72bb7..286cfe7068c1 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -382,6 +382,10 @@ struct kfd2kgd_calls { * * @resume: Notifies amdkfd about a resume action done to a kgd device * + * @quiesce_mm: Quiesce all user queue access to specified MM address space + * + * @resume_mm: Resume user queue access to specified MM address space + * * @schedule_evict_and_restore_process: Schedules work queue that will prepare * for safe eviction of KFD BOs that belong to the specified process. * @@ -399,6 +403,8 @@ struct kgd2kfd_calls { void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry); void (*suspend)(struct kfd_dev *kfd); int (*resume)(struct kfd_dev *kfd); + int (*quiesce_mm)(struct mm_struct *mm); + int (*resume_mm)(struct mm_struct *mm); int (*schedule_evict_and_restore_process)(struct mm_struct *mm, struct dma_fence *fence); }; -- cgit v1.2.1 From 5ae0283e831a94c714fce61063e4724baf364ef3 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Fri, 23 Mar 2018 15:32:33 -0400 Subject: drm/amdgpu: Add userptr support for KFD This adds support for allocating, mapping, unmapping and freeing userptr BOs, and for handling MMU notifiers. v2: updated a comment Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 11 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 567 ++++++++++++++++++++++- 2 files changed, 554 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 83e0c5c331d2..c3024b143f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include "amdgpu_sync.h" @@ -59,7 +60,9 @@ struct kgd_mem { uint32_t mapping_flags; + atomic_t invalid; struct amdkfd_process_info *process_info; + struct page **user_pages; struct amdgpu_sync sync; @@ -84,6 +87,9 @@ struct amdkfd_process_info { struct list_head vm_list_head; /* List head for all KFD BOs that belong to a KFD process. */ struct list_head kfd_bo_list; + /* List of userptr BOs that are valid or invalid */ + struct list_head userptr_valid_list; + struct list_head userptr_inval_list; /* Lock to protect kfd_bo_list */ struct mutex lock; @@ -91,6 +97,11 @@ struct amdkfd_process_info { unsigned int n_vms; /* Eviction Fence */ struct amdgpu_amdkfd_fence *eviction_fence; + + /* MMU-notifier related fields */ + atomic_t evicted_bos; + struct delayed_work restore_userptr_work; + struct pid *pid; }; int amdgpu_amdkfd_init(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2463ff6ac9ca..5296e24fd662 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) "kfd2kgd: " fmt #include +#include #include #include "amdgpu_object.h" #include "amdgpu_vm.h" @@ -33,10 +34,20 @@ */ #define VI_BO_SIZE_ALIGN (0x8000) +/* BO flag to indicate a KFD userptr BO */ +#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) + +/* Userptr restore delay, just long enough to allow consecutive VM + * changes to accumulate + */ +#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1 + /* Impose limit on how much memory KFD can use */ static struct { uint64_t max_system_mem_limit; + uint64_t max_userptr_mem_limit; int64_t system_mem_used; + int64_t userptr_mem_used; spinlock_t mem_limit_lock; } kfd_mem_limit; @@ -57,6 +68,7 @@ static const char * const domain_bit_to_string[] = { #define domain_string(domain) domain_bit_to_string[ffs(domain)-1] +static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) @@ -78,6 +90,7 @@ static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, /* Set memory usage limits. Current, limits are * System (kernel) memory - 3/8th System RAM + * Userptr memory - 3/4th System RAM */ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) { @@ -90,8 +103,10 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) spin_lock_init(&kfd_mem_limit.mem_limit_lock); kfd_mem_limit.max_system_mem_limit = (mem >> 1) - (mem >> 3); - pr_debug("Kernel memory limit %lluM\n", - (kfd_mem_limit.max_system_mem_limit >> 20)); + kfd_mem_limit.max_userptr_mem_limit = mem - (mem >> 2); + pr_debug("Kernel memory limit %lluM, userptr limit %lluM\n", + (kfd_mem_limit.max_system_mem_limit >> 20), + (kfd_mem_limit.max_userptr_mem_limit >> 20)); } static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev, @@ -111,6 +126,16 @@ static int amdgpu_amdkfd_reserve_system_mem_limit(struct amdgpu_device *adev, goto err_no_mem; } kfd_mem_limit.system_mem_used += (acc_size + size); + } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { + if ((kfd_mem_limit.system_mem_used + acc_size > + kfd_mem_limit.max_system_mem_limit) || + (kfd_mem_limit.userptr_mem_used + (size + acc_size) > + kfd_mem_limit.max_userptr_mem_limit)) { + ret = -ENOMEM; + goto err_no_mem; + } + kfd_mem_limit.system_mem_used += acc_size; + kfd_mem_limit.userptr_mem_used += size; } err_no_mem: spin_unlock(&kfd_mem_limit.mem_limit_lock); @@ -126,10 +151,16 @@ static void unreserve_system_mem_limit(struct amdgpu_device *adev, sizeof(struct amdgpu_bo)); spin_lock(&kfd_mem_limit.mem_limit_lock); - if (domain == AMDGPU_GEM_DOMAIN_GTT) + if (domain == AMDGPU_GEM_DOMAIN_GTT) { kfd_mem_limit.system_mem_used -= (acc_size + size); + } else if (domain == AMDGPU_GEM_DOMAIN_CPU) { + kfd_mem_limit.system_mem_used -= acc_size; + kfd_mem_limit.userptr_mem_used -= size; + } WARN_ONCE(kfd_mem_limit.system_mem_used < 0, "kfd system memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, + "kfd userptr memory accounting unbalanced"); spin_unlock(&kfd_mem_limit.mem_limit_lock); } @@ -138,12 +169,17 @@ void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) { spin_lock(&kfd_mem_limit.mem_limit_lock); - if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) { + if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { + kfd_mem_limit.system_mem_used -= bo->tbo.acc_size; + kfd_mem_limit.userptr_mem_used -= amdgpu_bo_size(bo); + } else if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT) { kfd_mem_limit.system_mem_used -= (bo->tbo.acc_size + amdgpu_bo_size(bo)); } WARN_ONCE(kfd_mem_limit.system_mem_used < 0, "kfd system memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.userptr_mem_used < 0, + "kfd userptr memory accounting unbalanced"); spin_unlock(&kfd_mem_limit.mem_limit_lock); } @@ -506,7 +542,8 @@ static void remove_bo_from_vm(struct amdgpu_device *adev, } static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, - struct amdkfd_process_info *process_info) + struct amdkfd_process_info *process_info, + bool userptr) { struct ttm_validate_buffer *entry = &mem->validate_list; struct amdgpu_bo *bo = mem->bo; @@ -515,8 +552,93 @@ static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, entry->shared = true; entry->bo = &bo->tbo; mutex_lock(&process_info->lock); - list_add_tail(&entry->head, &process_info->kfd_bo_list); + if (userptr) + list_add_tail(&entry->head, &process_info->userptr_valid_list); + else + list_add_tail(&entry->head, &process_info->kfd_bo_list); + mutex_unlock(&process_info->lock); +} + +/* Initializes user pages. It registers the MMU notifier and validates + * the userptr BO in the GTT domain. + * + * The BO must already be on the userptr_valid_list. Otherwise an + * eviction and restore may happen that leaves the new BO unmapped + * with the user mode queues running. + * + * Takes the process_info->lock to protect against concurrent restore + * workers. + * + * Returns 0 for success, negative errno for errors. + */ +static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, + uint64_t user_addr) +{ + struct amdkfd_process_info *process_info = mem->process_info; + struct amdgpu_bo *bo = mem->bo; + struct ttm_operation_ctx ctx = { true, false }; + int ret = 0; + + mutex_lock(&process_info->lock); + + ret = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, user_addr, 0); + if (ret) { + pr_err("%s: Failed to set userptr: %d\n", __func__, ret); + goto out; + } + + ret = amdgpu_mn_register(bo, user_addr); + if (ret) { + pr_err("%s: Failed to register MMU notifier: %d\n", + __func__, ret); + goto out; + } + + /* If no restore worker is running concurrently, user_pages + * should not be allocated + */ + WARN(mem->user_pages, "Leaking user_pages array"); + + mem->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages, + sizeof(struct page *), + GFP_KERNEL | __GFP_ZERO); + if (!mem->user_pages) { + pr_err("%s: Failed to allocate pages array\n", __func__); + ret = -ENOMEM; + goto unregister_out; + } + + ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, mem->user_pages); + if (ret) { + pr_err("%s: Failed to get user pages: %d\n", __func__, ret); + goto free_out; + } + + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, mem->user_pages); + + ret = amdgpu_bo_reserve(bo, true); + if (ret) { + pr_err("%s: Failed to reserve BO\n", __func__); + goto release_out; + } + amdgpu_ttm_placement_from_domain(bo, mem->domain); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + pr_err("%s: failed to validate BO\n", __func__); + amdgpu_bo_unreserve(bo); + +release_out: + if (ret) + release_pages(mem->user_pages, bo->tbo.ttm->num_pages); +free_out: + kvfree(mem->user_pages); + mem->user_pages = NULL; +unregister_out: + if (ret) + amdgpu_mn_unregister(bo); +out: mutex_unlock(&process_info->lock); + return ret; } /* Reserving a BO and its page table BOs must happen atomically to @@ -748,7 +870,8 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, } static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, struct amdgpu_sync *sync) + struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, + bool no_update_pte) { int ret; @@ -762,6 +885,9 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, return ret; } + if (no_update_pte) + return 0; + ret = update_gpuvm_pte(adev, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); @@ -820,6 +946,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, mutex_init(&info->lock); INIT_LIST_HEAD(&info->vm_list_head); INIT_LIST_HEAD(&info->kfd_bo_list); + INIT_LIST_HEAD(&info->userptr_valid_list); + INIT_LIST_HEAD(&info->userptr_inval_list); info->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), @@ -830,6 +958,11 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, goto create_evict_fence_fail; } + info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); + atomic_set(&info->evicted_bos, 0); + INIT_DELAYED_WORK(&info->restore_userptr_work, + amdgpu_amdkfd_restore_userptr_worker); + *process_info = info; *ef = dma_fence_get(&info->eviction_fence->base); } @@ -872,6 +1005,7 @@ reserve_pd_fail: dma_fence_put(*ef); *ef = NULL; *process_info = NULL; + put_pid(info->pid); create_evict_fence_fail: mutex_destroy(&info->lock); kfree(info); @@ -967,8 +1101,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, /* Release per-process resources when last compute VM is destroyed */ if (!process_info->n_vms) { WARN_ON(!list_empty(&process_info->kfd_bo_list)); + WARN_ON(!list_empty(&process_info->userptr_valid_list)); + WARN_ON(!list_empty(&process_info->userptr_inval_list)); dma_fence_put(&process_info->eviction_fence->base); + cancel_delayed_work_sync(&process_info->restore_userptr_work); + put_pid(process_info->pid); mutex_destroy(&process_info->lock); kfree(process_info); } @@ -1003,9 +1141,10 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + uint64_t user_addr = 0; struct amdgpu_bo *bo; int byte_align; - u32 alloc_domain; + u32 domain, alloc_domain; u64 alloc_flags; uint32_t mapping_flags; int ret; @@ -1014,14 +1153,21 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( * Check on which domain to allocate BO */ if (flags & ALLOC_MEM_FLAGS_VRAM) { - alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; + domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_CLEARED; alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : AMDGPU_GEM_CREATE_NO_CPU_ACCESS; } else if (flags & ALLOC_MEM_FLAGS_GTT) { - alloc_domain = AMDGPU_GEM_DOMAIN_GTT; + domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; + } else if (flags & ALLOC_MEM_FLAGS_USERPTR) { + domain = AMDGPU_GEM_DOMAIN_GTT; + alloc_domain = AMDGPU_GEM_DOMAIN_CPU; + alloc_flags = 0; + if (!offset || !*offset) + return -EINVAL; + user_addr = *offset; } else { return -EINVAL; } @@ -1078,18 +1224,34 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( } bo->kfd_bo = *mem; (*mem)->bo = bo; + if (user_addr) + bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; (*mem)->va = va; - (*mem)->domain = alloc_domain; + (*mem)->domain = domain; (*mem)->mapped_to_gpu_memory = 0; (*mem)->process_info = avm->process_info; - add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info); + add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr); + + if (user_addr) { + ret = init_user_pages(*mem, current->mm, user_addr); + if (ret) { + mutex_lock(&avm->process_info->lock); + list_del(&(*mem)->validate_list.head); + mutex_unlock(&avm->process_info->lock); + goto allocate_init_user_pages_failed; + } + } if (offset) *offset = amdgpu_bo_mmap_offset(bo); return 0; +allocate_init_user_pages_failed: + amdgpu_bo_unref(&bo); + /* Don't unreserve system mem limit twice */ + goto err_reserve_system_mem; err_bo_create: unreserve_system_mem_limit(adev, size, alloc_domain); err_reserve_system_mem: @@ -1122,12 +1284,24 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( * be freed anyway */ + /* No more MMU notifiers */ + amdgpu_mn_unregister(mem->bo); + /* Make sure restore workers don't access the BO any more */ bo_list_entry = &mem->validate_list; mutex_lock(&process_info->lock); list_del(&bo_list_entry->head); mutex_unlock(&process_info->lock); + /* Free user pages if necessary */ + if (mem->user_pages) { + pr_debug("%s: Freeing user_pages array\n", __func__); + if (mem->user_pages[0]) + release_pages(mem->user_pages, + mem->bo->tbo.ttm->num_pages); + kvfree(mem->user_pages); + } + ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) return ret; @@ -1173,21 +1347,32 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( struct kfd_bo_va_list *bo_va_entry = NULL; struct kfd_bo_va_list *bo_va_entry_aql = NULL; unsigned long bo_size; - - /* Make sure restore is not running concurrently. - */ - mutex_lock(&mem->process_info->lock); - - mutex_lock(&mem->lock); + bool is_invalid_userptr = false; bo = mem->bo; - if (!bo) { pr_err("Invalid BO when mapping memory to GPU\n"); - ret = -EINVAL; - goto out; + return -EINVAL; + } + + /* Make sure restore is not running concurrently. Since we + * don't map invalid userptr BOs, we rely on the next restore + * worker to do the mapping + */ + mutex_lock(&mem->process_info->lock); + + /* Lock mmap-sem. If we find an invalid userptr BO, we can be + * sure that the MMU notifier is no longer running + * concurrently and the queues are actually stopped + */ + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { + down_write(¤t->mm->mmap_sem); + is_invalid_userptr = atomic_read(&mem->invalid); + up_write(¤t->mm->mmap_sem); } + mutex_lock(&mem->lock); + domain = mem->domain; bo_size = bo->tbo.mem.size; @@ -1200,6 +1385,14 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( if (unlikely(ret)) goto out; + /* Userptr can be marked as "not invalid", but not actually be + * validated yet (still in the system domain). In that case + * the queues are still stopped and we can leave mapping for + * the next restore worker + */ + if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM) + is_invalid_userptr = true; + if (check_if_add_bo_to_vm(avm, mem)) { ret = add_bo_to_vm(adev, mem, avm, false, &bo_va_entry); @@ -1217,7 +1410,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto add_bo_to_vm_failed; } - if (mem->mapped_to_gpu_memory == 0) { + if (mem->mapped_to_gpu_memory == 0 && + !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { /* Validate BO only once. The eviction fence gets added to BO * the first time it is mapped. Validate will wait for all * background evictions to complete. @@ -1235,7 +1429,8 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( entry->va, entry->va + bo_size, entry); - ret = map_bo_to_gpuvm(adev, entry, ctx.sync); + ret = map_bo_to_gpuvm(adev, entry, ctx.sync, + is_invalid_userptr); if (ret) { pr_err("Failed to map radeon bo to gpuvm\n"); goto map_bo_to_gpuvm_failed; @@ -1418,13 +1613,337 @@ bo_reserve_failed: return ret; } +/* Evict a userptr BO by stopping the queues if necessary + * + * Runs in MMU notifier, may be in RECLAIM_FS context. This means it + * cannot do any memory allocations, and cannot take any locks that + * are held elsewhere while allocating memory. Therefore this is as + * simple as possible, using atomic counters. + * + * It doesn't do anything to the BO itself. The real work happens in + * restore, where we get updated page addresses. This function only + * ensures that GPU access to the BO is stopped. + */ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) { - /* TODO */ + struct amdkfd_process_info *process_info = mem->process_info; + int invalid, evicted_bos; + int r = 0; + + invalid = atomic_inc_return(&mem->invalid); + evicted_bos = atomic_inc_return(&process_info->evicted_bos); + if (evicted_bos == 1) { + /* First eviction, stop the queues */ + r = kgd2kfd->quiesce_mm(mm); + if (r) + pr_err("Failed to quiesce KFD\n"); + schedule_delayed_work(&process_info->restore_userptr_work, + msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); + } + + return r; +} + +/* Update invalid userptr BOs + * + * Moves invalidated (evicted) userptr BOs from userptr_valid_list to + * userptr_inval_list and updates user pages for all BOs that have + * been invalidated since their last update. + */ +static int update_invalid_user_pages(struct amdkfd_process_info *process_info, + struct mm_struct *mm) +{ + struct kgd_mem *mem, *tmp_mem; + struct amdgpu_bo *bo; + struct ttm_operation_ctx ctx = { false, false }; + int invalid, ret; + + /* Move all invalidated BOs to the userptr_inval_list and + * release their user pages by migration to the CPU domain + */ + list_for_each_entry_safe(mem, tmp_mem, + &process_info->userptr_valid_list, + validate_list.head) { + if (!atomic_read(&mem->invalid)) + continue; /* BO is still valid */ + + bo = mem->bo; + + if (amdgpu_bo_reserve(bo, true)) + return -EAGAIN; + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + amdgpu_bo_unreserve(bo); + if (ret) { + pr_err("%s: Failed to invalidate userptr BO\n", + __func__); + return -EAGAIN; + } + + list_move_tail(&mem->validate_list.head, + &process_info->userptr_inval_list); + } + + if (list_empty(&process_info->userptr_inval_list)) + return 0; /* All evicted userptr BOs were freed */ + + /* Go through userptr_inval_list and update any invalid user_pages */ + list_for_each_entry(mem, &process_info->userptr_inval_list, + validate_list.head) { + invalid = atomic_read(&mem->invalid); + if (!invalid) + /* BO hasn't been invalidated since the last + * revalidation attempt. Keep its BO list. + */ + continue; + + bo = mem->bo; + + if (!mem->user_pages) { + mem->user_pages = + kvmalloc_array(bo->tbo.ttm->num_pages, + sizeof(struct page *), + GFP_KERNEL | __GFP_ZERO); + if (!mem->user_pages) { + pr_err("%s: Failed to allocate pages array\n", + __func__); + return -ENOMEM; + } + } else if (mem->user_pages[0]) { + release_pages(mem->user_pages, bo->tbo.ttm->num_pages); + } + + /* Get updated user pages */ + ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, + mem->user_pages); + if (ret) { + mem->user_pages[0] = NULL; + pr_info("%s: Failed to get user pages: %d\n", + __func__, ret); + /* Pretend it succeeded. It will fail later + * with a VM fault if the GPU tries to access + * it. Better than hanging indefinitely with + * stalled user mode queues. + */ + } + + /* Mark the BO as valid unless it was invalidated + * again concurrently + */ + if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) + return -EAGAIN; + } + return 0; } +/* Validate invalid userptr BOs + * + * Validates BOs on the userptr_inval_list, and moves them back to the + * userptr_valid_list. Also updates GPUVM page tables with new page + * addresses and waits for the page table updates to complete. + */ +static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) +{ + struct amdgpu_bo_list_entry *pd_bo_list_entries; + struct list_head resv_list, duplicates; + struct ww_acquire_ctx ticket; + struct amdgpu_sync sync; + + struct amdgpu_vm *peer_vm; + struct kgd_mem *mem, *tmp_mem; + struct amdgpu_bo *bo; + struct ttm_operation_ctx ctx = { false, false }; + int i, ret; + + pd_bo_list_entries = kcalloc(process_info->n_vms, + sizeof(struct amdgpu_bo_list_entry), + GFP_KERNEL); + if (!pd_bo_list_entries) { + pr_err("%s: Failed to allocate PD BO list entries\n", __func__); + return -ENOMEM; + } + + INIT_LIST_HEAD(&resv_list); + INIT_LIST_HEAD(&duplicates); + + /* Get all the page directory BOs that need to be reserved */ + i = 0; + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) + amdgpu_vm_get_pd_bo(peer_vm, &resv_list, + &pd_bo_list_entries[i++]); + /* Add the userptr_inval_list entries to resv_list */ + list_for_each_entry(mem, &process_info->userptr_inval_list, + validate_list.head) { + list_add_tail(&mem->resv_list.head, &resv_list); + mem->resv_list.bo = mem->validate_list.bo; + mem->resv_list.shared = mem->validate_list.shared; + } + + /* Reserve all BOs and page tables for validation */ + ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); + WARN(!list_empty(&duplicates), "Duplicates should be empty"); + if (ret) + goto out; + + amdgpu_sync_create(&sync); + + /* Avoid triggering eviction fences when unmapping invalid + * userptr BOs (waits for all fences, doesn't use + * FENCE_OWNER_VM) + */ + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) + amdgpu_amdkfd_remove_eviction_fence(peer_vm->root.base.bo, + process_info->eviction_fence, + NULL, NULL); + + ret = process_validate_vms(process_info); + if (ret) + goto unreserve_out; + + /* Validate BOs and update GPUVM page tables */ + list_for_each_entry_safe(mem, tmp_mem, + &process_info->userptr_inval_list, + validate_list.head) { + struct kfd_bo_va_list *bo_va_entry; + + bo = mem->bo; + + /* Copy pages array and validate the BO if we got user pages */ + if (mem->user_pages[0]) { + amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm, + mem->user_pages); + amdgpu_ttm_placement_from_domain(bo, mem->domain); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) { + pr_err("%s: failed to validate BO\n", __func__); + goto unreserve_out; + } + } + + /* Validate succeeded, now the BO owns the pages, free + * our copy of the pointer array. Put this BO back on + * the userptr_valid_list. If we need to revalidate + * it, we need to start from scratch. + */ + kvfree(mem->user_pages); + mem->user_pages = NULL; + list_move_tail(&mem->validate_list.head, + &process_info->userptr_valid_list); + + /* Update mapping. If the BO was not validated + * (because we couldn't get user pages), this will + * clear the page table entries, which will result in + * VM faults if the GPU tries to access the invalid + * memory. + */ + list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { + if (!bo_va_entry->is_mapped) + continue; + + ret = update_gpuvm_pte((struct amdgpu_device *) + bo_va_entry->kgd_dev, + bo_va_entry, &sync); + if (ret) { + pr_err("%s: update PTE failed\n", __func__); + /* make sure this gets validated again */ + atomic_inc(&mem->invalid); + goto unreserve_out; + } + } + } + + /* Update page directories */ + ret = process_update_pds(process_info, &sync); + +unreserve_out: + list_for_each_entry(peer_vm, &process_info->vm_list_head, + vm_list_node) + amdgpu_bo_fence(peer_vm->root.base.bo, + &process_info->eviction_fence->base, true); + ttm_eu_backoff_reservation(&ticket, &resv_list); + amdgpu_sync_wait(&sync, false); + amdgpu_sync_free(&sync); +out: + kfree(pd_bo_list_entries); + + return ret; +} + +/* Worker callback to restore evicted userptr BOs + * + * Tries to update and validate all userptr BOs. If successful and no + * concurrent evictions happened, the queues are restarted. Otherwise, + * reschedule for another attempt later. + */ +static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct amdkfd_process_info *process_info = + container_of(dwork, struct amdkfd_process_info, + restore_userptr_work); + struct task_struct *usertask; + struct mm_struct *mm; + int evicted_bos; + + evicted_bos = atomic_read(&process_info->evicted_bos); + if (!evicted_bos) + return; + + /* Reference task and mm in case of concurrent process termination */ + usertask = get_pid_task(process_info->pid, PIDTYPE_PID); + if (!usertask) + return; + mm = get_task_mm(usertask); + if (!mm) { + put_task_struct(usertask); + return; + } + + mutex_lock(&process_info->lock); + + if (update_invalid_user_pages(process_info, mm)) + goto unlock_out; + /* userptr_inval_list can be empty if all evicted userptr BOs + * have been freed. In that case there is nothing to validate + * and we can just restart the queues. + */ + if (!list_empty(&process_info->userptr_inval_list)) { + if (atomic_read(&process_info->evicted_bos) != evicted_bos) + goto unlock_out; /* Concurrent eviction, try again */ + + if (validate_invalid_user_pages(process_info)) + goto unlock_out; + } + /* Final check for concurrent evicton and atomic update. If + * another eviction happens after successful update, it will + * be a first eviction that calls quiesce_mm. The eviction + * reference counting inside KFD will handle this case. + */ + if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != + evicted_bos) + goto unlock_out; + evicted_bos = 0; + if (kgd2kfd->resume_mm(mm)) { + pr_err("%s: Failed to resume KFD\n", __func__); + /* No recovery from this failure. Probably the CP is + * hanging. No point trying again. + */ + } +unlock_out: + mutex_unlock(&process_info->lock); + mmput(mm); + put_task_struct(usertask); + + /* If validation failed, reschedule another attempt */ + if (evicted_bos) + schedule_delayed_work(&process_info->restore_userptr_work, + msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); +} + /** amdgpu_amdkfd_gpuvm_restore_process_bos - Restore all BOs for the given * KFD process identified by process_info * -- cgit v1.2.1 From 5e7086eecc32b95288bc76f2a22aadeb368e25ed Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:32:58 -0400 Subject: drm/amdgpu: Remove unused interface from kfd2kgd interface Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 10 ---------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 10 ---------- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 5 ----- 3 files changed, 25 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ea54e53172b9..0ff36d45a597 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -98,8 +98,6 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, unsigned int vmid); -static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, - uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, @@ -183,7 +181,6 @@ static const struct kfd2kgd_calls kfd2kgd = { .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_pipeline = kgd_init_pipeline, .init_interrupts = kgd_init_interrupts, .hqd_load = kgd_hqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, @@ -309,13 +306,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, return 0; } -static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, - uint32_t hpd_size, uint64_t hpd_gpu_addr) -{ - /* amdgpu owns the per-pipe state */ - return 0; -} - static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 89264c9a5e9f..6ef9762b4b00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -57,8 +57,6 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, uint32_t sh_mem_bases); static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, unsigned int vmid); -static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, - uint32_t hpd_size, uint64_t hpd_gpu_addr); static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, @@ -141,7 +139,6 @@ static const struct kfd2kgd_calls kfd2kgd = { .free_pasid = amdgpu_pasid_free, .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_pipeline = kgd_init_pipeline, .init_interrupts = kgd_init_interrupts, .hqd_load = kgd_hqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, @@ -270,13 +267,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, return 0; } -static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, - uint32_t hpd_size, uint64_t hpd_gpu_addr) -{ - /* amdgpu owns the per-pipe state */ - return 0; -} - static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) { struct amdgpu_device *adev = get_amdgpu_device(kgd); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 286cfe7068c1..7cf35068866f 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -173,8 +173,6 @@ struct tile_config { * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp * scheduling mode. Only used for no cp scheduling mode. * - * @init_pipeline: Initialized the compute pipelines. - * * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp * sceduling mode. * @@ -274,9 +272,6 @@ struct kfd2kgd_calls { int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid, unsigned int vmid); - int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id, - uint32_t hpd_size, uint64_t hpd_gpu_addr); - int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, -- cgit v1.2.1 From cf05fb8b144dae55d094b0fa7991e985a9b4561e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:32:59 -0400 Subject: drm/amd: Update GFXv9 SDMA MQD structure This matches what the HWS firmware expects on GFXv9 chips. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/include/v9_structs.h | 48 ++++++++++++++++---------------- 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index 2fb25abaf7c8..ceaf4932258d 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -29,10 +29,10 @@ struct v9_sdma_mqd { uint32_t sdmax_rlcx_rb_base; uint32_t sdmax_rlcx_rb_base_hi; uint32_t sdmax_rlcx_rb_rptr; + uint32_t sdmax_rlcx_rb_rptr_hi; uint32_t sdmax_rlcx_rb_wptr; + uint32_t sdmax_rlcx_rb_wptr_hi; uint32_t sdmax_rlcx_rb_wptr_poll_cntl; - uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi; - uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo; uint32_t sdmax_rlcx_rb_rptr_addr_hi; uint32_t sdmax_rlcx_rb_rptr_addr_lo; uint32_t sdmax_rlcx_ib_cntl; @@ -44,29 +44,29 @@ struct v9_sdma_mqd { uint32_t sdmax_rlcx_skip_cntl; uint32_t sdmax_rlcx_context_status; uint32_t sdmax_rlcx_doorbell; - uint32_t sdmax_rlcx_virtual_addr; - uint32_t sdmax_rlcx_ape1_cntl; + uint32_t sdmax_rlcx_status; uint32_t sdmax_rlcx_doorbell_log; - uint32_t reserved_22; - uint32_t reserved_23; - uint32_t reserved_24; - uint32_t reserved_25; - uint32_t reserved_26; - uint32_t reserved_27; - uint32_t reserved_28; - uint32_t reserved_29; - uint32_t reserved_30; - uint32_t reserved_31; - uint32_t reserved_32; - uint32_t reserved_33; - uint32_t reserved_34; - uint32_t reserved_35; - uint32_t reserved_36; - uint32_t reserved_37; - uint32_t reserved_38; - uint32_t reserved_39; - uint32_t reserved_40; - uint32_t reserved_41; + uint32_t sdmax_rlcx_watermark; + uint32_t sdmax_rlcx_doorbell_offset; + uint32_t sdmax_rlcx_csa_addr_lo; + uint32_t sdmax_rlcx_csa_addr_hi; + uint32_t sdmax_rlcx_ib_sub_remain; + uint32_t sdmax_rlcx_preempt; + uint32_t sdmax_rlcx_dummy_reg; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_hi; + uint32_t sdmax_rlcx_rb_wptr_poll_addr_lo; + uint32_t sdmax_rlcx_rb_aql_cntl; + uint32_t sdmax_rlcx_minor_ptr_update; + uint32_t sdmax_rlcx_midcmd_data0; + uint32_t sdmax_rlcx_midcmd_data1; + uint32_t sdmax_rlcx_midcmd_data2; + uint32_t sdmax_rlcx_midcmd_data3; + uint32_t sdmax_rlcx_midcmd_data4; + uint32_t sdmax_rlcx_midcmd_data5; + uint32_t sdmax_rlcx_midcmd_data6; + uint32_t sdmax_rlcx_midcmd_data7; + uint32_t sdmax_rlcx_midcmd_data8; + uint32_t sdmax_rlcx_midcmd_cntl; uint32_t reserved_42; uint32_t reserved_43; uint32_t reserved_44; -- cgit v1.2.1 From ab88bded7522dafc1a6beb251092365519a01c4e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:00 -0400 Subject: drm/amdgpu: Add GFXv9 TLB invalidation packet definition Signed-off-by: Shaoyun Liu Signed-off-by: Jay Cornwall Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/soc15d.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index 7f408f85fdb6..f22f7a88ce0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -268,6 +268,11 @@ * x=1: tmz_end */ +#define PACKET3_INVALIDATE_TLBS 0x98 +# define PACKET3_INVALIDATE_TLBS_DST_SEL(x) ((x) << 0) +# define PACKET3_INVALIDATE_TLBS_ALL_HUB(x) ((x) << 4) +# define PACKET3_INVALIDATE_TLBS_PASID(x) ((x) << 5) +# define PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(x) ((x) << 29) #define PACKET3_SET_RESOURCES 0xA0 /* 1. header * 2. CONTROL -- cgit v1.2.1 From d5a114a6c5f7fa41da338e0134fccf3f25723fbd Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:01 -0400 Subject: drm/amdgpu: Add GFXv9 kfd2kgd interface functions Signed-off-by: John Bridgman Signed-off-by: Shaoyun Liu Signed-off-by: Jay Cornwall Signed-off-by: Yong Zhao Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 1043 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 5 files changed, 1051 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2ca2b5154d52..f3002020df6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -130,7 +130,8 @@ amdgpu-y += \ amdgpu_amdkfd.o \ amdgpu_amdkfd_fence.o \ amdgpu_amdkfd_gpuvm.o \ - amdgpu_amdkfd_gfx_v8.o + amdgpu_amdkfd_gfx_v8.o \ + amdgpu_amdkfd_gfx_v9.o # add cgs amdgpu-y += amdgpu_cgs.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4d36203ffb11..fcd10dbd121c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -92,6 +92,10 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) case CHIP_POLARIS11: kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); break; + case CHIP_VEGA10: + case CHIP_RAVEN: + kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); + break; default: dev_dbg(adev->dev, "kfd not supported on this ASIC\n"); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index c3024b143f3d..12367a9951e8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -122,6 +122,7 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c new file mode 100644 index 000000000000..8f37991df61b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -0,0 +1,1043 @@ +/* + * Copyright 2014-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#define pr_fmt(fmt) "kfd2kgd: " fmt + +#include +#include +#include +#include +#include +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_ucode.h" +#include "soc15_hw_ip.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" +#include "vega10_enum.h" +#include "sdma0/sdma0_4_0_offset.h" +#include "sdma0/sdma0_4_0_sh_mask.h" +#include "sdma1/sdma1_4_0_offset.h" +#include "sdma1/sdma1_4_0_sh_mask.h" +#include "athub/athub_1_0_offset.h" +#include "athub/athub_1_0_sh_mask.h" +#include "oss/osssys_4_0_offset.h" +#include "oss/osssys_4_0_sh_mask.h" +#include "soc15_common.h" +#include "v9_structs.h" +#include "soc15.h" +#include "soc15d.h" + +/* HACK: MMHUB and GC both have VM-related register with the same + * names but different offsets. Define the MMHUB register we need here + * with a prefix. A proper solution would be to move the functions + * programming these registers into gfx_v9_0.c and mmhub_v1_0.c + * respectively. + */ +#define mmMMHUB_VM_INVALIDATE_ENG16_REQ 0x06f3 +#define mmMMHUB_VM_INVALIDATE_ENG16_REQ_BASE_IDX 0 + +#define mmMMHUB_VM_INVALIDATE_ENG16_ACK 0x0705 +#define mmMMHUB_VM_INVALIDATE_ENG16_ACK_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32 0x072b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32 0x072c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32 0x074b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32 0x074c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32 0x076b +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32_BASE_IDX 0 +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32 0x076c +#define mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32_BASE_IDX 0 + +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32 0x0727 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32_BASE_IDX 0 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32 0x0728 +#define mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32_BASE_IDX 0 + +#define V9_PIPE_PER_MEC (4) +#define V9_QUEUES_PER_PIPE_MEC (8) + +enum hqd_dequeue_request_type { + NO_ACTION = 0, + DRAIN_PIPE, + RESET_WAVES +}; + +/* + * Register access functions + */ + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases); +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid); +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm); +static int kgd_hqd_dump(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm); +static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs); +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id); +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id); +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout); +static int kgd_address_watch_disable(struct kgd_dev *kgd); +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo); +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd); +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset); + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid); +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid); +static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint32_t page_table_base); +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid); +static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); +static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); + +/* Because of REG_GET_FIELD() being used, we put this function in the + * asic specific file. + */ +static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, + struct tile_config *config) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + + config->gb_addr_config = adev->gfx.config.gb_addr_config; + + config->tile_config_ptr = adev->gfx.config.tile_mode_array; + config->num_tile_configs = + ARRAY_SIZE(adev->gfx.config.tile_mode_array); + config->macro_tile_config_ptr = + adev->gfx.config.macrotile_mode_array; + config->num_macro_tile_configs = + ARRAY_SIZE(adev->gfx.config.macrotile_mode_array); + + return 0; +} + +static const struct kfd2kgd_calls kfd2kgd = { + .init_gtt_mem_allocation = alloc_gtt_mem, + .free_gtt_mem = free_gtt_mem, + .get_local_mem_info = get_local_mem_info, + .get_gpu_clock_counter = get_gpu_clock_counter, + .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz, + .alloc_pasid = amdgpu_pasid_alloc, + .free_pasid = amdgpu_pasid_free, + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_pasid = + get_atc_vmid_pasid_mapping_pasid, + .get_atc_vmid_pasid_mapping_valid = + get_atc_vmid_pasid_mapping_valid, + .get_fw_version = get_fw_version, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = amdgpu_amdkfd_get_tile_config, + .get_cu_info = get_cu_info, + .get_vram_usage = amdgpu_amdkfd_get_vram_usage, + .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm, + .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm, + .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm, + .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu, + .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu, + .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu, + .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu, + .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory, + .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel, + .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .submit_ib = amdgpu_amdkfd_submit_ib, +}; + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) +{ + return (struct kfd2kgd_calls *)&kfd2kgd; +} + +static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) +{ + return (struct amdgpu_device *)kgd; +} + +static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + mutex_lock(&adev->srbm_mutex); + soc15_grbm_select(adev, mec, pipe, queue, vmid); +} + +static void unlock_srbm(struct kgd_dev *kgd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + soc15_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, queue_id, 0); +} + +static uint32_t get_queue_mask(struct amdgpu_device *adev, + uint32_t pipe_id, uint32_t queue_id) +{ + unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe + + queue_id) & 31; + + return ((uint32_t)1) << bit; +} + +static void release_queue(struct kgd_dev *kgd) +{ + unlock_srbm(kgd); +} + +static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, + uint32_t sh_mem_config, + uint32_t sh_mem_ape1_base, + uint32_t sh_mem_ape1_limit, + uint32_t sh_mem_bases) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + lock_srbm(kgd, 0, 0, 0, vmid); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + /* APE1 no longer exists on GFX9 */ + + unlock_srbm(kgd); +} + +static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, + unsigned int vmid) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + + /* + * We have to assume that there is no outstanding mapping. + * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because + * a mapping is in progress or because a mapping finished + * and the SW cleared it. + * So the protocol is to always wait & clear. + */ + uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | + ATC_VMID0_PASID_MAPPING__VALID_MASK; + + /* + * need to do this twice, once for gfx and once for mmhub + * for ATC add 16 to VMID for mmhub, for IH different registers. + * ATC_VMID0..15 registers are separate from ATC_VMID16..31. + */ + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, + pasid_mapping); + + while (!(RREG32(SOC15_REG_OFFSET( + ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & + (1U << vmid))) + cpu_relax(); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS), + 1U << vmid); + + /* Mapping vmid to pasid also for IH block */ + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, + pasid_mapping); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid, + pasid_mapping); + + while (!(RREG32(SOC15_REG_OFFSET( + ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & + (1U << (vmid + 16)))) + cpu_relax(); + + WREG32(SOC15_REG_OFFSET(ATHUB, 0, + mmATC_VMID_PASID_MAPPING_UPDATE_STATUS), + 1U << (vmid + 16)); + + /* Mapping vmid to pasid also for IH block */ + WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid, + pasid_mapping); + return 0; +} + +/* TODO - RING0 form of field is obsolete, seems to date back to SI + * but still works + */ + +static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t mec; + uint32_t pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + lock_srbm(kgd, mec, pipe, 0, 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), + CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | + CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); + + unlock_srbm(kgd); + + return 0; +} + +static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t base[2] = { + SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, + SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL + }; + uint32_t retval; + + retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - + mmSDMA0_RLC0_RB_CNTL); + + pr_debug("sdma base address: 0x%x\n", retval); + + return retval; +} + +static inline struct v9_mqd *get_mqd(void *mqd) +{ + return (struct v9_mqd *)mqd; +} + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + uint32_t queue_id, uint32_t __user *wptr, + uint32_t wptr_shift, uint32_t wptr_mask, + struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_mqd *m; + uint32_t *mqd_hqd; + uint32_t reg, hqd_base, data; + + m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + /* HIQ is set during driver init period with vmid set to 0*/ + if (m->cp_hqd_vmid == 0) { + uint32_t value, mec, pipe; + + mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; + pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); + + pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", + mec, pipe, queue_id); + value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS)); + value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1, + ((mec << 5) | (pipe << 3) | queue_id | 0x80)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value); + } + + /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ + mqd_hqd = &m->cp_mqd_base_addr_lo; + hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + + for (reg = hqd_base; + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + WREG32(reg, mqd_hqd[reg - hqd_base]); + + + /* Activate doorbell logic before triggering WPTR poll. */ + data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, + CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + + if (wptr) { + /* Don't read wptr with get_user because the user + * context may not be accessible (if this function + * runs in a work queue). Instead trigger a one-shot + * polling read from memory in the CP. This assumes + * that wptr is GPU-accessible in the queue's VMID via + * ATC or SVM. WPTR==RPTR before starting the poll so + * the CP starts fetching new commands from the right + * place. + * + * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit + * tricky. Assume that the queue didn't overflow. The + * number of valid bits in the 32-bit RPTR depends on + * the queue size. The remaining bits are taken from + * the saved 64-bit WPTR. If the WPTR wrapped, add the + * queue size. + */ + uint32_t queue_size = + 2 << REG_GET_FIELD(m->cp_hqd_pq_control, + CP_HQD_PQ_CONTROL, QUEUE_SIZE); + uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); + + if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) + guessed_wptr += queue_size; + guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); + guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + lower_32_bits(guessed_wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + upper_32_bits(guessed_wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + lower_32_bits((uint64_t)wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + upper_32_bits((uint64_t)wptr)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), + get_queue_mask(adev, pipe_id, queue_id)); + } + + /* Start the EOP fetcher */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + REG_SET_FIELD(m->cp_hqd_eop_rptr, + CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); + + data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + + release_queue(kgd); + + return 0; +} + +static int kgd_hqd_dump(struct kgd_dev *kgd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t i = 0, reg; +#define HQD_N_REGS 56 +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + acquire_queue(kgd, pipe_id, queue_id); + + for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + DUMP_REG(reg); + + release_queue(kgd); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + sdmax_gfx_context_cntl = m->sdma_engine_id ? + SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : + SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + data = RREG32(sdmax_gfx_context_cntl); + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, + RESUME_CTX, 0); + WREG32(sdmax_gfx_context_cntl, data); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + + return 0; +} + +static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+10) + + *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; + reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_base_addr + reg); + for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; + reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_base_addr + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, + uint32_t pipe_id, uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t act; + bool retval = false; + uint32_t low, high; + + acquire_queue(kgd, pipe_id, queue_id); + act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + if (act) { + low = lower_32_bits(queue_address >> 8); + high = upper_32_bits(queue_address >> 8); + + if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) && + high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) + retval = true; + } + release_queue(kgd); + return retval; +} + +static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, + enum kfd_preempt_type reset_type, + unsigned int utimeout, uint32_t pipe_id, + uint32_t queue_id) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + enum hqd_dequeue_request_type type; + unsigned long end_jiffies; + uint32_t temp; + struct v9_mqd *m = get_mqd(mqd); + + acquire_queue(kgd, pipe_id, queue_id); + + if (m->cp_hqd_vmid == 0) + WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); + + switch (reset_type) { + case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: + type = DRAIN_PIPE; + break; + case KFD_PREEMPT_TYPE_WAVEFRONT_RESET: + type = RESET_WAVES; + break; + default: + type = DRAIN_PIPE; + break; + } + + WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + + end_jiffies = (utimeout * HZ / 1000) + jiffies; + while (true) { + temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); + if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("cp queue preemption time out.\n"); + release_queue(kgd); + return -ETIME; + } + usleep_range(500, 1000); + } + + release_queue(kgd); + return 0; +} + +static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, + unsigned int utimeout) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + struct v9_sdma_mqd *m; + uint32_t sdma_base_addr; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) + return -ETIME; + usleep_range(500, 1000); + } + + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + + return 0; +} + +static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; +} + +static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, + uint8_t vmid) +{ + uint32_t reg; + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + + vmid); + return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; +} + +static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + uint32_t req = (1 << vmid) | + (0 << VM_INVALIDATE_ENG16_REQ__FLUSH_TYPE__SHIFT) | /* legacy */ + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PTES_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE0_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE1_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L2_PDE2_MASK | + VM_INVALIDATE_ENG16_REQ__INVALIDATE_L1_PTES_MASK; + + mutex_lock(&adev->srbm_mutex); + + /* Use legacy mode tlb invalidation. + * + * Currently on Raven the code below is broken for anything but + * legacy mode due to a MMHUB power gating problem. A workaround + * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ + * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack + * bit. + * + * TODO 1: agree on the right set of invalidation registers for + * KFD use. Use the last one for now. Invalidate both GC and + * MMHUB. + * + * TODO 2: support range-based invalidation, requires kfg2kgd + * interface change + */ + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_LO32), + 0xffffffff); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ADDR_RANGE_HI32), + 0x0000001f); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_LO32), + 0xffffffff); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ADDR_RANGE_HI32), + 0x0000001f); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_REQ), req); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_INVALIDATE_ENG16_REQ), + req); + + while (!(RREG32(SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG16_ACK)) & + (1 << vmid))) + cpu_relax(); + + while (!(RREG32(SOC15_REG_OFFSET(MMHUB, 0, + mmMMHUB_VM_INVALIDATE_ENG16_ACK)) & + (1 << vmid))) + cpu_relax(); + + mutex_unlock(&adev->srbm_mutex); + +} + +static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) +{ + signed long r; + uint32_t seq; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + + spin_lock(&adev->gfx.kiq.ring_lock); + amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/ + amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0)); + amdgpu_ring_write(ring, + PACKET3_INVALIDATE_TLBS_DST_SEL(1) | + PACKET3_INVALIDATE_TLBS_ALL_HUB(1) | + PACKET3_INVALIDATE_TLBS_PASID(pasid) | + PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(0)); /* legacy */ + amdgpu_fence_emit_polling(ring, &seq); + amdgpu_ring_commit(ring); + spin_unlock(&adev->gfx.kiq.ring_lock); + + r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout); + if (r < 1) { + DRM_ERROR("wait for kiq fence error: %ld.\n", r); + return -ETIME; + } + + return 0; +} + +static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + int vmid; + struct amdgpu_ring *ring = &adev->gfx.kiq.ring; + + if (ring->ready) + return invalidate_tlbs_with_kiq(adev, pasid); + + for (vmid = 0; vmid < 16; vmid++) { + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) + continue; + if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { + if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) + == pasid) { + write_vmid_invalidate_request(kgd, vmid); + break; + } + } + } + + return 0; +} + +static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { + pr_err("non kfd vmid %d\n", vmid); + return 0; + } + + write_vmid_invalidate_request(kgd, vmid); + return 0; +} + +static int kgd_address_watch_disable(struct kgd_dev *kgd) +{ + return 0; +} + +static int kgd_address_watch_execute(struct kgd_dev *kgd, + unsigned int watch_point_id, + uint32_t cntl_val, + uint32_t addr_hi, + uint32_t addr_lo) +{ + return 0; +} + +static int kgd_wave_control_execute(struct kgd_dev *kgd, + uint32_t gfx_index_val, + uint32_t sq_cmd) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint32_t data = 0; + + mutex_lock(&adev->grbm_idx_mutex); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val); + WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd); + + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + INSTANCE_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SH_BROADCAST_WRITES, 1); + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, + SE_BROADCAST_WRITES, 1); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data); + mutex_unlock(&adev->grbm_idx_mutex); + + return 0; +} + +static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, + unsigned int watch_point_id, + unsigned int reg_offset) +{ + return 0; +} + +static void set_scratch_backing_va(struct kgd_dev *kgd, + uint64_t va, uint32_t vmid) +{ + /* No longer needed on GFXv9. The scratch base address is + * passed to the shader by the CP. It's the user mode driver's + * responsibility. + */ +} + +/* FIXME: Does this need to be ASIC-specific code? */ +static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) +{ + struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + const union amdgpu_firmware_header *hdr; + + switch (type) { + case KGD_ENGINE_PFP: + hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data; + break; + + case KGD_ENGINE_ME: + hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data; + break; + + case KGD_ENGINE_CE: + hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data; + break; + + case KGD_ENGINE_MEC1: + hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data; + break; + + case KGD_ENGINE_MEC2: + hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data; + break; + + case KGD_ENGINE_RLC: + hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data; + break; + + case KGD_ENGINE_SDMA1: + hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data; + break; + + case KGD_ENGINE_SDMA2: + hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data; + break; + + default: + return 0; + } + + if (hdr == NULL) + return 0; + + /* Only 12 bit in use*/ + return hdr->common.ucode_version; +} + +static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, + uint32_t page_table_base) +{ + struct amdgpu_device *adev = get_amdgpu_device(kgd); + uint64_t base = (uint64_t)page_table_base << PAGE_SHIFT | + AMDGPU_PTE_VALID; + + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { + pr_err("trying to set page table base for wrong VMID %u\n", + vmid); + return; + } + + /* TODO: take advantage of per-process address space size. For + * now, all processes share the same address space size, like + * on GFX8 and older. + */ + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), + upper_32_bits(adev->vm_manager.max_pfn - 1)); + + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); + WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmMMHUB_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), + lower_32_bits(adev->vm_manager.max_pfn - 1)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), + upper_32_bits(adev->vm_manager.max_pfn - 1)); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); + WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); +} diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9d39fd5b1822..e5962e61beb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4686,6 +4686,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, cu_info->number = active_cu_number; cu_info->ao_cu_mask = ao_cu_mask; + cu_info->simd_per_cu = NUM_SIMD_PER_CU; return 0; } -- cgit v1.2.1 From 642a0e80262af8e9d5b8129e2149c670ab3bb4b8 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:02 -0400 Subject: drm/amdgpu: Add doorbell routing info to kgd2kfd_shared_resources This is needed for Vega10 and later ASICs to let KFD know which doorbells can be used for SDMA and CP queues respectively. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 15 +++++++++++++++ 2 files changed, 37 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index fcd10dbd121c..cd0e8f192e6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -179,6 +179,28 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) &gpu_resources.doorbell_physical_address, &gpu_resources.doorbell_aperture_size, &gpu_resources.doorbell_start_offset); + if (adev->asic_type >= CHIP_VEGA10) { + /* On SOC15 the BIF is involved in routing + * doorbells using the low 12 bits of the + * address. Communicate the assignments to + * KFD. KFD uses two doorbell pages per + * process in case of 64-bit doorbells so we + * can use each doorbell assignment twice. + */ + gpu_resources.sdma_doorbell[0][0] = + AMDGPU_DOORBELL64_sDMA_ENGINE0; + gpu_resources.sdma_doorbell[0][1] = + AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200; + gpu_resources.sdma_doorbell[1][0] = + AMDGPU_DOORBELL64_sDMA_ENGINE1; + gpu_resources.sdma_doorbell[1][1] = + AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200; + /* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for + * SDMA, IH and VCN. So don't use them for the CP. + */ + gpu_resources.reserved_doorbell_mask = 0x1f0; + gpu_resources.reserved_doorbell_val = 0x0f0; + } kgd2kfd->device_init(adev->kfd, &gpu_resources); } diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 7cf35068866f..5733fbee07f7 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -100,6 +100,21 @@ struct kgd2kfd_shared_resources { /* Bit n == 1 means Queue n is available for KFD */ DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES); + /* Doorbell assignments (SOC15 and later chips only). Only + * specific doorbells are routed to each SDMA engine. Others + * are routed to IH and VCN. They are not usable by the CP. + * + * Any doorbell number D that satisfies the following condition + * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val + * + * KFD currently uses 1024 (= 0x3ff) doorbells per process. If + * doorbells 0x0f0-0x0f7 and 0x2f-0x2f7 are reserved, that means + * mask would be set to 0x1f8 and val set to 0x0f0. + */ + unsigned int sdma_doorbell[2][2]; + unsigned int reserved_doorbell_mask; + unsigned int reserved_doorbell_val; + /* Base address of doorbell aperture. */ phys_addr_t doorbell_physical_address; -- cgit v1.2.1 From ada2b29c4a79efbdc5bf5eed876bad6b00f43536 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:03 -0400 Subject: drm/amdkfd: Make doorbell size ASIC-dependent This prepares for GFXv9 (Vega10), which has 64-bit doorbells. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 10 +++++++ drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 48 ++++++++++++++++--------------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 7 +++-- 3 files changed, 39 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 7b5799530c0f..f563acbc1ad7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -41,6 +41,7 @@ static const struct kfd_device_info kaveri_device_info = { .max_pasid_bits = 16, /* max num of queues for KV.TODO should be a dynamic value */ .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -55,6 +56,7 @@ static const struct kfd_device_info carrizo_device_info = { .max_pasid_bits = 16, /* max num of queues for CZ.TODO should be a dynamic value */ .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -70,6 +72,7 @@ static const struct kfd_device_info hawaii_device_info = { .max_pasid_bits = 16, /* max num of queues for KV.TODO should be a dynamic value */ .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -83,6 +86,7 @@ static const struct kfd_device_info tonga_device_info = { .asic_family = CHIP_TONGA, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -96,6 +100,7 @@ static const struct kfd_device_info tonga_vf_device_info = { .asic_family = CHIP_TONGA, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -109,6 +114,7 @@ static const struct kfd_device_info fiji_device_info = { .asic_family = CHIP_FIJI, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -122,6 +128,7 @@ static const struct kfd_device_info fiji_vf_device_info = { .asic_family = CHIP_FIJI, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -136,6 +143,7 @@ static const struct kfd_device_info polaris10_device_info = { .asic_family = CHIP_POLARIS10, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -149,6 +157,7 @@ static const struct kfd_device_info polaris10_vf_device_info = { .asic_family = CHIP_POLARIS10, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, @@ -162,6 +171,7 @@ static const struct kfd_device_info polaris11_device_info = { .asic_family = CHIP_POLARIS11, .max_pasid_bits = 16, .max_no_of_hqd = 24, + .doorbell_size = 4, .ih_ring_entry_size = 4 * sizeof(uint32_t), .event_interrupt_class = &event_interrupt_class_cik, .num_of_watch_points = 4, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index ebb4da14e3df..484031423d1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -33,7 +33,6 @@ static DEFINE_IDA(doorbell_ida); static unsigned int max_doorbell_slices; -#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4 /* * Each device exposes a doorbell aperture, a PCI MMIO aperture that @@ -50,9 +49,9 @@ static unsigned int max_doorbell_slices; */ /* # of doorbell bytes allocated for each process. */ -static inline size_t doorbell_process_allocation(void) +static size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) { - return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES * + return roundup(kfd->device_info->doorbell_size * KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, PAGE_SIZE); } @@ -72,16 +71,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd) doorbell_start_offset = roundup(kfd->shared_resources.doorbell_start_offset, - doorbell_process_allocation()); + kfd_doorbell_process_slice(kfd)); doorbell_aperture_size = rounddown(kfd->shared_resources.doorbell_aperture_size, - doorbell_process_allocation()); + kfd_doorbell_process_slice(kfd)); if (doorbell_aperture_size > doorbell_start_offset) doorbell_process_limit = (doorbell_aperture_size - doorbell_start_offset) / - doorbell_process_allocation(); + kfd_doorbell_process_slice(kfd); else return -ENOSPC; @@ -95,7 +94,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd) kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32); kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base, - doorbell_process_allocation()); + kfd_doorbell_process_slice(kfd)); if (!kfd->doorbell_kernel_ptr) return -ENOMEM; @@ -132,16 +131,16 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) phys_addr_t address; struct kfd_dev *dev; + /* Find kfd device according to gpu id */ + dev = kfd_device_by_id(vma->vm_pgoff); + if (!dev) + return -EINVAL; + /* * For simplicitly we only allow mapping of the entire doorbell * allocation of a single device & process. */ - if (vma->vm_end - vma->vm_start != doorbell_process_allocation()) - return -EINVAL; - - /* Find kfd device according to gpu id */ - dev = kfd_device_by_id(vma->vm_pgoff); - if (!dev) + if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev)) return -EINVAL; /* Calculate physical address of doorbell */ @@ -158,19 +157,19 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) " vm_flags == 0x%04lX\n" " size == 0x%04lX\n", (unsigned long long) vma->vm_start, address, vma->vm_flags, - doorbell_process_allocation()); + kfd_doorbell_process_slice(dev)); return io_remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT, - doorbell_process_allocation(), + kfd_doorbell_process_slice(dev), vma->vm_page_prot); } /* get kernel iomem pointer for a doorbell */ -u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, +void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off) { u32 inx; @@ -185,6 +184,8 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) return NULL; + inx *= kfd->device_info->doorbell_size / sizeof(u32); + /* * Calculating the kernel doorbell offset using the first * doorbell page. @@ -210,7 +211,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr) mutex_unlock(&kfd->doorbell_mutex); } -inline void write_kernel_doorbell(u32 __iomem *db, u32 value) +void write_kernel_doorbell(void __iomem *db, u32 value) { if (db) { writel(value, db); @@ -228,20 +229,21 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, { /* * doorbell_id_offset accounts for doorbells taken by KGD. - * index * doorbell_process_allocation/sizeof(u32) adjusts to - * the process's doorbells. + * index * kfd_doorbell_process_slice/sizeof(u32) adjusts to + * the process's doorbells. The offset returned is in dword + * units regardless of the ASIC-dependent doorbell size. */ return kfd->doorbell_id_offset + process->doorbell_index - * doorbell_process_allocation() / sizeof(u32) + - queue_id; + * kfd_doorbell_process_slice(kfd) / sizeof(u32) + + queue_id * kfd->device_info->doorbell_size / sizeof(u32); } uint64_t kfd_get_number_elems(struct kfd_dev *kfd) { uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size - kfd->shared_resources.doorbell_start_offset) / - doorbell_process_allocation() + 1; + kfd_doorbell_process_slice(kfd) + 1; return num_of_elems; @@ -251,7 +253,7 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, struct kfd_process *process) { return dev->doorbell_base + - process->doorbell_index * doorbell_process_allocation(); + process->doorbell_index * kfd_doorbell_process_slice(dev); } int kfd_alloc_process_doorbells(struct kfd_process *process) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4d5c49ef2dc5..d9c0fe126429 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -160,6 +160,7 @@ struct kfd_device_info { const struct kfd_event_interrupt_class *event_interrupt_class; unsigned int max_pasid_bits; unsigned int max_no_of_hqd; + unsigned int doorbell_size; size_t ih_ring_entry_size; uint8_t num_of_watch_points; uint16_t mqd_size_aligned; @@ -364,7 +365,7 @@ struct queue_properties { uint32_t queue_percent; uint32_t *read_ptr; uint32_t *write_ptr; - uint32_t __iomem *doorbell_ptr; + void __iomem *doorbell_ptr; uint32_t doorbell_off; bool is_interop; bool is_evicted; @@ -728,11 +729,11 @@ void kfd_pasid_free(unsigned int pasid); int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma); -u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, +void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off); void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); u32 read_kernel_doorbell(u32 __iomem *db); -void write_kernel_doorbell(u32 __iomem *db, u32 value); +void write_kernel_doorbell(void __iomem *db, u32 value); unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, struct kfd_process *process, unsigned int queue_id); -- cgit v1.2.1 From df03ef9342ce09985210679a734f88a269c19ff5 Mon Sep 17 00:00:00 2001 From: Harish Kasiviswanathan Date: Tue, 10 Apr 2018 17:33:04 -0400 Subject: drm/amdkfd: Clean up KFD_MMAP_ offset handling Use bit-rotate for better clarity and remove _MASK from the #defines as these represent mmap types. Centralize all the parsing of the mmap offset in kfd_mmap and add device parameter to doorbell and reserved_mem map functions. Encode gpu_id into upper bits of vm_pgoff. This frees up the lower bits for encoding the the doorbell ID on Vega10. Signed-off-by: Harish Kasiviswanathan Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 35 ++++++++++++++++++---------- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 9 ++------ drivers/gpu/drm/amd/amdkfd/kfd_events.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 38 ++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +++---- 5 files changed, 59 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index cd679cf1fd30..519c7b1854b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -292,7 +292,8 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ - args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id); + args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; + args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); args->doorbell_offset <<= PAGE_SHIFT; mutex_unlock(&p->mutex); @@ -1644,23 +1645,33 @@ err_i1: static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) { struct kfd_process *process; + struct kfd_dev *dev = NULL; + unsigned long vm_pgoff; + unsigned int gpu_id; process = kfd_get_process(current); if (IS_ERR(process)) return PTR_ERR(process); - if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) == - KFD_MMAP_DOORBELL_MASK) { - vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK; - return kfd_doorbell_mmap(process, vma); - } else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) == - KFD_MMAP_EVENTS_MASK) { - vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK; + vm_pgoff = vma->vm_pgoff; + vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff); + gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff); + if (gpu_id) + dev = kfd_device_by_id(gpu_id); + + switch (vm_pgoff & KFD_MMAP_TYPE_MASK) { + case KFD_MMAP_TYPE_DOORBELL: + if (!dev) + return -ENODEV; + return kfd_doorbell_mmap(dev, process, vma); + + case KFD_MMAP_TYPE_EVENTS: return kfd_event_mmap(process, vma); - } else if ((vma->vm_pgoff & KFD_MMAP_RESERVED_MEM_MASK) == - KFD_MMAP_RESERVED_MEM_MASK) { - vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_RESERVED_MEM_MASK; - return kfd_reserved_mem_mmap(process, vma); + + case KFD_MMAP_TYPE_RESERVED_MEM: + if (!dev) + return -ENODEV; + return kfd_reserved_mem_mmap(dev, process, vma); } return -EFAULT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 484031423d1f..efc59dea563f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -126,15 +126,10 @@ void kfd_doorbell_fini(struct kfd_dev *kfd) iounmap(kfd->doorbell_kernel_ptr); } -int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma) +int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, + struct vm_area_struct *vma) { phys_addr_t address; - struct kfd_dev *dev; - - /* Find kfd device according to gpu id */ - dev = kfd_device_by_id(vma->vm_pgoff); - if (!dev) - return -EINVAL; /* * For simplicitly we only allow mapping of the entire doorbell diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 4890a90f1e44..bccf2f761177 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -345,7 +345,7 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p, case KFD_EVENT_TYPE_DEBUG: ret = create_signal_event(devkfd, p, ev); if (!ret) { - *event_page_offset = KFD_MMAP_EVENTS_MASK; + *event_page_offset = KFD_MMAP_TYPE_EVENTS; *event_page_offset <<= PAGE_SHIFT; *event_slot_index = ev->event_id; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d9c0fe126429..2d575c014651 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -41,9 +41,33 @@ #define KFD_SYSFS_FILE_MODE 0444 -#define KFD_MMAP_DOORBELL_MASK 0x8000000000000ull -#define KFD_MMAP_EVENTS_MASK 0x4000000000000ull -#define KFD_MMAP_RESERVED_MEM_MASK 0x2000000000000ull +/* GPU ID hash width in bits */ +#define KFD_GPU_ID_HASH_WIDTH 16 + +/* Use upper bits of mmap offset to store KFD driver specific information. + * BITS[63:62] - Encode MMAP type + * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to + * BITS[45:0] - MMAP offset value + * + * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these + * defines are w.r.t to PAGE_SIZE + */ +#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) +#define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) +#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) +#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) +#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) + +#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT) +#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ + << KFD_MMAP_GPU_ID_SHIFT) +#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ + & KFD_MMAP_GPU_ID_MASK) +#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ + >> KFD_MMAP_GPU_ID_SHIFT) + +#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT) +#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK) /* * When working with cp scheduler we should assign the HIQ manually or via @@ -55,9 +79,6 @@ #define KFD_CIK_HIQ_PIPE 4 #define KFD_CIK_HIQ_QUEUE 0 -/* GPU ID hash width in bits */ -#define KFD_GPU_ID_HASH_WIDTH 16 - /* Macro for allocating structures */ #define kfd_alloc_struct(ptr_to_struct) \ ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) @@ -698,7 +719,7 @@ struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, struct kfd_process *p); -int kfd_reserved_mem_mmap(struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, struct vm_area_struct *vma); /* KFD process API for creating and translating handles */ @@ -728,7 +749,8 @@ void kfd_pasid_free(unsigned int pasid); /* Doorbells */ int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); -int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma); +int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, + struct vm_area_struct *vma); void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, unsigned int *doorbell_off); void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 2791e72c2058..131fe2a1b589 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -451,7 +451,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) continue; - offset = (dev->id | KFD_MMAP_RESERVED_MEM_MASK) << PAGE_SHIFT; + offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id)) + << PAGE_SHIFT; qpd->tba_addr = (int64_t)vm_mmap(filep, 0, KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC, MAP_SHARED, offset); @@ -989,15 +990,12 @@ int kfd_resume_all_processes(void) return ret; } -int kfd_reserved_mem_mmap(struct kfd_process *process, +int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, struct vm_area_struct *vma) { - struct kfd_dev *dev = kfd_device_by_id(vma->vm_pgoff); struct kfd_process_device *pdd; struct qcm_process_device *qpd; - if (!dev) - return -EINVAL; if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) { pr_err("Incorrect CWSR mapping size.\n"); return -EINVAL; -- cgit v1.2.1 From ef568db792e66216b48fd1567ff4a9d3bf9af866 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:05 -0400 Subject: drm/amdkfd: Implement doorbell allocation for SOC15 Allocate doorbells according to the doorbell routing information on SOC15 ASICs (Vega10 and later). On older ASICs we continue to use the queue_id as the doorbell ID to maintain compatibility with the Thunk. Signed-off-by: Shaoyun Liu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 82 ++++++++++++++++++++-- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 12 ++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 11 ++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 32 +++++++++ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 12 +++- 6 files changed, 139 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 519c7b1854b2..5694fbead9a5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -295,6 +295,13 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); args->doorbell_offset <<= PAGE_SHIFT; + if (KFD_IS_SOC15(dev->device_info->asic_family)) + /* On SOC15 ASICs, doorbell allocation must be + * per-device, and independent from the per-process + * queue_id. Return the doorbell offset within the + * doorbell aperture to user mode. + */ + args->doorbell_offset |= q_properties.doorbell_off; mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d55d29d31da4..e9c72d8f0935 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -110,6 +110,57 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, qpd->sh_mem_bases); } +static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) +{ + struct kfd_dev *dev = qpd->dqm->dev; + + if (!KFD_IS_SOC15(dev->device_info->asic_family)) { + /* On pre-SOC15 chips we need to use the queue ID to + * preserve the user mode ABI. + */ + q->doorbell_id = q->properties.queue_id; + } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { + /* For SDMA queues on SOC15, use static doorbell + * assignments based on the engine and queue. + */ + q->doorbell_id = dev->shared_resources.sdma_doorbell + [q->properties.sdma_engine_id] + [q->properties.sdma_queue_id]; + } else { + /* For CP queues on SOC15 reserve a free doorbell ID */ + unsigned int found; + + found = find_first_zero_bit(qpd->doorbell_bitmap, + KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); + if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { + pr_debug("No doorbells available"); + return -EBUSY; + } + set_bit(found, qpd->doorbell_bitmap); + q->doorbell_id = found; + } + + q->properties.doorbell_off = + kfd_doorbell_id_to_offset(dev, q->process, + q->doorbell_id); + + return 0; +} + +static void deallocate_doorbell(struct qcm_process_device *qpd, + struct queue *q) +{ + unsigned int old; + struct kfd_dev *dev = qpd->dqm->dev; + + if (!KFD_IS_SOC15(dev->device_info->asic_family) || + q->properties.type == KFD_QUEUE_TYPE_SDMA) + return; + + old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); + WARN_ON(!old); +} + static int allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -301,10 +352,14 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, if (retval) return retval; + retval = allocate_doorbell(qpd, q); + if (retval) + goto out_deallocate_hqd; + retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval) - goto out_deallocate_hqd; + goto out_deallocate_doorbell; pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", q->pipe, q->queue); @@ -324,6 +379,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, out_uninit_mqd: mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); +out_deallocate_doorbell: + deallocate_doorbell(qpd, q); out_deallocate_hqd: deallocate_hqd(dqm, q); @@ -357,6 +414,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, } dqm->total_queue_count--; + deallocate_doorbell(qpd, q); + retval = mqd->destroy_mqd(mqd, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_RESET, KFD_UNMAP_LATENCY_MS, @@ -861,6 +920,10 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE; q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; + retval = allocate_doorbell(qpd, q); + if (retval) + goto out_deallocate_sdma_queue; + pr_debug("SDMA id is: %d\n", q->sdma_id); pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); @@ -869,7 +932,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval) - goto out_deallocate_sdma_queue; + goto out_deallocate_doorbell; retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL); if (retval) @@ -879,6 +942,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, out_uninit_mqd: mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); +out_deallocate_doorbell: + deallocate_doorbell(qpd, q); out_deallocate_sdma_queue: deallocate_sdma_queue(dqm, q->sdma_id); @@ -1070,12 +1135,17 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; } + + retval = allocate_doorbell(qpd, q); + if (retval) + goto out_deallocate_sdma_queue; + mqd = dqm->ops.get_mqd_manager(dqm, get_mqd_type_from_queue_type(q->properties.type)); if (!mqd) { retval = -ENOMEM; - goto out_deallocate_sdma_queue; + goto out_deallocate_doorbell; } /* * Eviction state logic: we only mark active queues as evicted @@ -1093,7 +1163,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval) - goto out_deallocate_sdma_queue; + goto out_deallocate_doorbell; list_add(&q->list, &qpd->queues_list); qpd->queue_count++; @@ -1117,6 +1187,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, mutex_unlock(&dqm->lock); return retval; +out_deallocate_doorbell: + deallocate_doorbell(qpd, q); out_deallocate_sdma_queue: if (q->properties.type == KFD_QUEUE_TYPE_SDMA) deallocate_sdma_queue(dqm, q->sdma_id); @@ -1257,6 +1329,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, goto failed; } + deallocate_doorbell(qpd, q); + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { dqm->sdma_queue_count--; deallocate_sdma_queue(dqm, q->sdma_id); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index efc59dea563f..36c9269ea7c0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -49,7 +49,7 @@ static unsigned int max_doorbell_slices; */ /* # of doorbell bytes allocated for each process. */ -static size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) +size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) { return roundup(kfd->device_info->doorbell_size * KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, @@ -214,13 +214,9 @@ void write_kernel_doorbell(void __iomem *db, u32 value) } } -/* - * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1 - * to doorbells with the process's doorbell page - */ -unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, +unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, struct kfd_process *process, - unsigned int queue_id) + unsigned int doorbell_id) { /* * doorbell_id_offset accounts for doorbells taken by KGD. @@ -231,7 +227,7 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, return kfd->doorbell_id_offset + process->doorbell_index * kfd_doorbell_process_slice(kfd) / sizeof(u32) + - queue_id * kfd->device_info->doorbell_size / sizeof(u32); + doorbell_id * kfd->device_info->doorbell_size / sizeof(u32); } uint64_t kfd_get_number_elems(struct kfd_dev *kfd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 2d575c014651..ddb3c8cdfb7b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -169,6 +169,8 @@ enum cache_policy { cache_policy_noncoherent }; +#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) + struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, const uint32_t *ih_ring_entry); @@ -449,6 +451,7 @@ struct queue { uint32_t queue; unsigned int sdma_id; + unsigned int doorbell_id; struct kfd_process *process; struct kfd_dev *device; @@ -523,6 +526,9 @@ struct qcm_process_device { /* IB memory */ uint64_t ib_base; void *ib_kaddr; + + /* doorbell resources per process per device */ + unsigned long *doorbell_bitmap; }; /* KFD Memory Eviction */ @@ -747,6 +753,7 @@ unsigned int kfd_pasid_alloc(void); void kfd_pasid_free(unsigned int pasid); /* Doorbells */ +size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); int kfd_doorbell_init(struct kfd_dev *kfd); void kfd_doorbell_fini(struct kfd_dev *kfd); int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, @@ -756,9 +763,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); u32 read_kernel_doorbell(u32 __iomem *db); void write_kernel_doorbell(void __iomem *db, u32 value); -unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd, +unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, struct kfd_process *process, - unsigned int queue_id); + unsigned int doorbell_id); phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, struct kfd_process *process); int kfd_alloc_process_doorbells(struct kfd_process *process); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 131fe2a1b589..1d80b4f7c681 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -332,6 +332,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) free_pages((unsigned long)pdd->qpd.cwsr_kaddr, get_order(KFD_CWSR_TBA_TMA_SIZE)); + kfree(pdd->qpd.doorbell_bitmap); idr_destroy(&pdd->alloc_idr); kfree(pdd); @@ -586,6 +587,31 @@ err_alloc_process: return ERR_PTR(err); } +static int init_doorbell_bitmap(struct qcm_process_device *qpd, + struct kfd_dev *dev) +{ + unsigned int i; + + if (!KFD_IS_SOC15(dev->device_info->asic_family)) + return 0; + + qpd->doorbell_bitmap = + kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, + BITS_PER_BYTE), GFP_KERNEL); + if (!qpd->doorbell_bitmap) + return -ENOMEM; + + /* Mask out any reserved doorbells */ + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++) + if ((dev->shared_resources.reserved_doorbell_mask & i) == + dev->shared_resources.reserved_doorbell_val) { + set_bit(i, qpd->doorbell_bitmap); + pr_debug("reserved doorbell 0x%03x\n", i); + } + + return 0; +} + struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, struct kfd_process *p) { @@ -607,6 +633,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, if (!pdd) return NULL; + if (init_doorbell_bitmap(&pdd->qpd, dev)) { + pr_err("Failed to init doorbell for process\n"); + kfree(pdd); + return NULL; + } + pdd->dev = dev; INIT_LIST_HEAD(&pdd->qpd.queues_list); INIT_LIST_HEAD(&pdd->qpd.priv_queue_list); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7817e327ea6d..3045aebdc3f7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -119,9 +119,6 @@ static int create_cp_queue(struct process_queue_manager *pqm, /* Doorbell initialized in user space*/ q_properties->doorbell_ptr = NULL; - q_properties->doorbell_off = - kfd_queue_id_to_doorbell(dev, pqm->process, qid); - /* let DQM handle it*/ q_properties->vmid = 0; q_properties->queue_id = qid; @@ -248,6 +245,15 @@ int pqm_create_queue(struct process_queue_manager *pqm, goto err_create_queue; } + if (q) + /* Return the doorbell offset within the doorbell page + * to the caller so it can be passed up to user mode + * (in bytes). + */ + properties->doorbell_off = + (q->properties.doorbell_off * sizeof(uint32_t)) & + (kfd_doorbell_process_slice(dev) - 1); + pr_debug("PQM After DQM create queue\n"); list_add(&pqn->process_queue_list, &pqm->queues); -- cgit v1.2.1 From f6e27ff19d9db90e55576dea5aef98feb3d0ce5e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:06 -0400 Subject: drm/amdkfd: Move packet writer functions into ASIC-specific file This is in preparation for GFXv9 (Vega10) which uses incompatible PM4 packet formats from previous ASIC generations. Signed-off-by: Shaoyun Liu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 10 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 310 +++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 381 ++++----------------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 35 +- 4 files changed, 420 insertions(+), 316 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e9c72d8f0935..500f022d089d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -196,15 +196,19 @@ static int allocate_vmid(struct device_queue_manager *dqm, static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, struct qcm_process_device *qpd) { - uint32_t len; + const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf; + int ret; if (!qpd->ib_kaddr) return -ENOMEM; - len = pm_create_release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); + ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); + if (ret) + return ret; return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, - qpd->ib_base, (uint32_t *)qpd->ib_kaddr, len); + qpd->ib_base, (uint32_t *)qpd->ib_kaddr, + pmf->release_mem_size / sizeof(uint32_t)); } static void deallocate_vmid(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c index f1d48281e322..7ee326fa486d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -22,6 +22,9 @@ */ #include "kfd_kernel_queue.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers_vi.h" +#include "kfd_pm4_opcodes.h" static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size); @@ -54,3 +57,310 @@ static void uninitialize_vi(struct kernel_queue *kq) { kfd_gtt_sa_free(kq->dev, kq->eop_mem); } + +static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size) +{ + union PM4_MES_TYPE_3_HEADER header; + + header.u32All = 0; + header.opcode = opcode; + header.count = packet_size / 4 - 2; + header.type = PM4_TYPE_3; + + return header.u32All; +} + +static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer, + struct qcm_process_device *qpd) +{ + struct pm4_mes_map_process *packet; + + packet = (struct pm4_mes_map_process *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_map_process)); + + packet->header.u32All = build_pm4_header(IT_MAP_PROCESS, + sizeof(struct pm4_mes_map_process)); + packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; + packet->bitfields2.process_quantum = 1; + packet->bitfields2.pasid = qpd->pqm->process->pasid; + packet->bitfields3.page_table_base = qpd->page_table_base; + packet->bitfields10.gds_size = qpd->gds_size; + packet->bitfields10.num_gws = qpd->num_gws; + packet->bitfields10.num_oac = qpd->num_oac; + packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + + packet->sh_mem_config = qpd->sh_mem_config; + packet->sh_mem_bases = qpd->sh_mem_bases; + packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; + packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; + + packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); + + return 0; +} + +static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain) +{ + struct pm4_mes_runlist *packet; + int concurrent_proc_cnt = 0; + struct kfd_dev *kfd = pm->dqm->dev; + + if (WARN_ON(!ib)) + return -EFAULT; + + /* Determine the number of processes to map together to HW: + * it can not exceed the number of VMIDs available to the + * scheduler, and it is determined by the smaller of the number + * of processes in the runlist and kfd module parameter + * hws_max_conc_proc. + * Note: the arbitration between the number of VMIDs and + * hws_max_conc_proc has been done in + * kgd2kfd_device_init(). + */ + concurrent_proc_cnt = min(pm->dqm->processes_count, + kfd->max_proc_per_quantum); + + packet = (struct pm4_mes_runlist *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_runlist)); + packet->header.u32All = build_pm4_header(IT_RUN_LIST, + sizeof(struct pm4_mes_runlist)); + + packet->bitfields4.ib_size = ib_size_in_dwords; + packet->bitfields4.chain = chain ? 1 : 0; + packet->bitfields4.offload_polling = 0; + packet->bitfields4.valid = 1; + packet->bitfields4.process_cnt = concurrent_proc_cnt; + packet->ordinal2 = lower_32_bits(ib); + packet->bitfields3.ib_base_hi = upper_32_bits(ib); + + return 0; +} + +static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res) +{ + struct pm4_mes_set_resources *packet; + + packet = (struct pm4_mes_set_resources *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); + + packet->header.u32All = build_pm4_header(IT_SET_RESOURCES, + sizeof(struct pm4_mes_set_resources)); + + packet->bitfields2.queue_type = + queue_type__mes_set_resources__hsa_interface_queue_hiq; + packet->bitfields2.vmid_mask = res->vmid_mask; + packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; + packet->bitfields7.oac_mask = res->oac_mask; + packet->bitfields8.gds_heap_base = res->gds_heap_base; + packet->bitfields8.gds_heap_size = res->gds_heap_size; + + packet->gws_mask_lo = lower_32_bits(res->gws_mask); + packet->gws_mask_hi = upper_32_bits(res->gws_mask); + + packet->queue_mask_lo = lower_32_bits(res->queue_mask); + packet->queue_mask_hi = upper_32_bits(res->queue_mask); + + return 0; +} + +static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static) +{ + struct pm4_mes_map_queues *packet; + bool use_static = is_static; + + packet = (struct pm4_mes_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); + + packet->header.u32All = build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_mes_map_queues)); + packet->bitfields2.alloc_format = + alloc_format__mes_map_queues__one_per_pipe_vi; + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_compute_vi; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + if (use_static) + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_latency_static_queue_vi; + break; + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.queue_type = + queue_type__mes_map_queues__debug_interface_queue_vi; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = q->properties.sdma_engine_id + + engine_sel__mes_map_queues__sdma0_vi; + use_static = false; /* no static queues under SDMA */ + break; + default: + WARN(1, "queue type %d", q->properties.type); + return -EINVAL; + } + packet->bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + +static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, + enum kfd_queue_type type, + enum kfd_unmap_queues_filter filter, + uint32_t filter_param, bool reset, + unsigned int sdma_engine) +{ + struct pm4_mes_unmap_queues *packet; + + packet = (struct pm4_mes_unmap_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); + + packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES, + sizeof(struct pm4_mes_unmap_queues)); + switch (type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + break; + default: + WARN(1, "queue type %d", type); + return -EINVAL; + } + + if (reset) + packet->bitfields2.action = + action__mes_unmap_queues__reset_queues; + else + packet->bitfields2.action = + action__mes_unmap_queues__preempt_queues; + + switch (filter) { + case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_specified_queues; + packet->bitfields2.num_queues = 1; + packet->bitfields3b.doorbell_offset0 = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_BY_PASID: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; + packet->bitfields3a.pasid = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_queues; + break; + case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: + /* in this case, we do not preempt static queues */ + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_non_static_queues; + break; + default: + WARN(1, "filter %d", filter); + return -EINVAL; + } + + return 0; + +} + +static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, + uint64_t fence_address, uint32_t fence_value) +{ + struct pm4_mes_query_status *packet; + + packet = (struct pm4_mes_query_status *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_query_status)); + + packet->header.u32All = build_pm4_header(IT_QUERY_STATUS, + sizeof(struct pm4_mes_query_status)); + + packet->bitfields2.context_id = 0; + packet->bitfields2.interrupt_sel = + interrupt_sel__mes_query_status__completion_status; + packet->bitfields2.command = + command__mes_query_status__fence_only_after_write_ack; + + packet->addr_hi = upper_32_bits((uint64_t)fence_address); + packet->addr_lo = lower_32_bits((uint64_t)fence_address); + packet->data_hi = upper_32_bits((uint64_t)fence_value); + packet->data_lo = lower_32_bits((uint64_t)fence_value); + + return 0; +} + +static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer) +{ + struct pm4_mec_release_mem *packet; + + packet = (struct pm4_mec_release_mem *)buffer; + memset(buffer, 0, sizeof(*packet)); + + packet->header.u32All = build_pm4_header(IT_RELEASE_MEM, + sizeof(*packet)); + + packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; + packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; + packet->bitfields2.tcl1_action_ena = 1; + packet->bitfields2.tc_action_ena = 1; + packet->bitfields2.cache_policy = cache_policy___release_mem__lru; + packet->bitfields2.atc = 0; + + packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; + packet->bitfields3.int_sel = + int_sel___release_mem__send_interrupt_after_write_confirm; + + packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; + packet->address_hi = upper_32_bits(gpu_addr); + + packet->data_lo = 0; + + return 0; +} + +const struct packet_manager_funcs kfd_vi_pm_funcs = { + .map_process = pm_map_process_vi, + .runlist = pm_runlist_vi, + .set_resources = pm_set_resources_vi, + .map_queues = pm_map_queues_vi, + .unmap_queues = pm_unmap_queues_vi, + .query_status = pm_query_status_vi, + .release_mem = pm_release_mem_vi, + .map_process_size = sizeof(struct pm4_mes_map_process), + .runlist_size = sizeof(struct pm4_mes_runlist), + .set_resources_size = sizeof(struct pm4_mes_set_resources), + .map_queues_size = sizeof(struct pm4_mes_map_queues), + .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .query_status_size = sizeof(struct pm4_mes_query_status), + .release_mem_size = sizeof(struct pm4_mec_release_mem) +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 89ba4c670ec5..860ff2481747 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -26,8 +26,6 @@ #include "kfd_device_queue_manager.h" #include "kfd_kernel_queue.h" #include "kfd_priv.h" -#include "kfd_pm4_headers_vi.h" -#include "kfd_pm4_opcodes.h" static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, unsigned int buffer_size_bytes) @@ -39,18 +37,6 @@ static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, *wptr = temp; } -static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size) -{ - union PM4_MES_TYPE_3_HEADER header; - - header.u32All = 0; - header.opcode = opcode; - header.count = packet_size / 4 - 2; - header.type = PM4_TYPE_3; - - return header.u32All; -} - static void pm_calc_rlib_size(struct packet_manager *pm, unsigned int *rlib_size, bool *over_subscription) @@ -80,9 +66,9 @@ static void pm_calc_rlib_size(struct packet_manager *pm, pr_debug("Over subscribed runlist\n"); } - map_queue_size = sizeof(struct pm4_mes_map_queues); + map_queue_size = pm->pmf->map_queues_size; /* calculate run list ib allocation size */ - *rlib_size = process_count * sizeof(struct pm4_mes_map_process) + + *rlib_size = process_count * pm->pmf->map_process_size + queue_count * map_queue_size; /* @@ -90,7 +76,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm, * when over subscription */ if (*over_subscription) - *rlib_size += sizeof(struct pm4_mes_runlist); + *rlib_size += pm->pmf->runlist_size; pr_debug("runlist ib size %d\n", *rlib_size); } @@ -124,137 +110,6 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, return retval; } -static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer, - uint64_t ib, size_t ib_size_in_dwords, bool chain) -{ - struct pm4_mes_runlist *packet; - int concurrent_proc_cnt = 0; - struct kfd_dev *kfd = pm->dqm->dev; - - if (WARN_ON(!ib)) - return -EFAULT; - - /* Determine the number of processes to map together to HW: - * it can not exceed the number of VMIDs available to the - * scheduler, and it is determined by the smaller of the number - * of processes in the runlist and kfd module parameter - * hws_max_conc_proc. - * Note: the arbitration between the number of VMIDs and - * hws_max_conc_proc has been done in - * kgd2kfd_device_init(). - */ - concurrent_proc_cnt = min(pm->dqm->processes_count, - kfd->max_proc_per_quantum); - - packet = (struct pm4_mes_runlist *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = build_pm4_header(IT_RUN_LIST, - sizeof(struct pm4_mes_runlist)); - - packet->bitfields4.ib_size = ib_size_in_dwords; - packet->bitfields4.chain = chain ? 1 : 0; - packet->bitfields4.offload_polling = 0; - packet->bitfields4.valid = 1; - packet->bitfields4.process_cnt = concurrent_proc_cnt; - packet->ordinal2 = lower_32_bits(ib); - packet->bitfields3.ib_base_hi = upper_32_bits(ib); - - return 0; -} - -static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, - struct qcm_process_device *qpd) -{ - struct pm4_mes_map_process *packet; - - packet = (struct pm4_mes_map_process *)buffer; - - memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - - packet->header.u32All = build_pm4_header(IT_MAP_PROCESS, - sizeof(struct pm4_mes_map_process)); - packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; - packet->bitfields2.process_quantum = 1; - packet->bitfields2.pasid = qpd->pqm->process->pasid; - packet->bitfields3.page_table_base = qpd->page_table_base; - packet->bitfields10.gds_size = qpd->gds_size; - packet->bitfields10.num_gws = qpd->num_gws; - packet->bitfields10.num_oac = qpd->num_oac; - packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; - - packet->sh_mem_config = qpd->sh_mem_config; - packet->sh_mem_bases = qpd->sh_mem_bases; - packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; - packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; - - packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base; - - packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); - packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); - - return 0; -} - -static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, - struct queue *q, bool is_static) -{ - struct pm4_mes_map_queues *packet; - bool use_static = is_static; - - packet = (struct pm4_mes_map_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - - packet->header.u32All = build_pm4_header(IT_MAP_QUEUES, - sizeof(struct pm4_mes_map_queues)); - packet->bitfields2.alloc_format = - alloc_format__mes_map_queues__one_per_pipe_vi; - packet->bitfields2.num_queues = 1; - packet->bitfields2.queue_sel = - queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; - - packet->bitfields2.engine_sel = - engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_compute_vi; - - switch (q->properties.type) { - case KFD_QUEUE_TYPE_COMPUTE: - if (use_static) - packet->bitfields2.queue_type = - queue_type__mes_map_queues__normal_latency_static_queue_vi; - break; - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.queue_type = - queue_type__mes_map_queues__debug_interface_queue_vi; - break; - case KFD_QUEUE_TYPE_SDMA: - packet->bitfields2.engine_sel = q->properties.sdma_engine_id + - engine_sel__mes_map_queues__sdma0_vi; - use_static = false; /* no static queues under SDMA */ - break; - default: - WARN(1, "queue type %d", q->properties.type); - return -EINVAL; - } - packet->bitfields3.doorbell_offset = - q->properties.doorbell_off; - - packet->mqd_addr_lo = - lower_32_bits(q->gart_mqd_addr); - - packet->mqd_addr_hi = - upper_32_bits(q->gart_mqd_addr); - - packet->wptr_addr_lo = - lower_32_bits((uint64_t)q->properties.write_ptr); - - packet->wptr_addr_hi = - upper_32_bits((uint64_t)q->properties.write_ptr); - - return 0; -} - static int pm_create_runlist_ib(struct packet_manager *pm, struct list_head *queues, uint64_t *rl_gpu_addr, @@ -292,12 +147,12 @@ static int pm_create_runlist_ib(struct packet_manager *pm, return -ENOMEM; } - retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); + retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); if (retval) return retval; proccesses_mapped++; - inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process), + inc_wptr(&rl_wptr, pm->pmf->map_process_size, alloc_size_bytes); list_for_each_entry(kq, &qpd->priv_queue_list, list) { @@ -307,7 +162,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pr_debug("static_queue, mapping kernel q %d, is debug status %d\n", kq->queue->queue, qpd->is_debug); - retval = pm_create_map_queue(pm, + retval = pm->pmf->map_queues(pm, &rl_buffer[rl_wptr], kq->queue, qpd->is_debug); @@ -315,7 +170,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, return retval; inc_wptr(&rl_wptr, - sizeof(struct pm4_mes_map_queues), + pm->pmf->map_queues_size, alloc_size_bytes); } @@ -326,7 +181,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pr_debug("static_queue, mapping user queue %d, is debug status %d\n", q->queue, qpd->is_debug); - retval = pm_create_map_queue(pm, + retval = pm->pmf->map_queues(pm, &rl_buffer[rl_wptr], q, qpd->is_debug); @@ -335,7 +190,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, return retval; inc_wptr(&rl_wptr, - sizeof(struct pm4_mes_map_queues), + pm->pmf->map_queues_size, alloc_size_bytes); } } @@ -343,7 +198,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, pr_debug("Finished map process and queues to runlist\n"); if (is_over_subscription) - retval = pm_create_runlist(pm, &rl_buffer[rl_wptr], + retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr, alloc_size_bytes / sizeof(uint32_t), true); @@ -355,45 +210,25 @@ static int pm_create_runlist_ib(struct packet_manager *pm, return retval; } -/* pm_create_release_mem - Create a RELEASE_MEM packet and return the size - * of this packet - * @gpu_addr - GPU address of the packet. It's a virtual address. - * @buffer - buffer to fill up with the packet. It's a CPU kernel pointer - * Return - length of the packet - */ -uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer) -{ - struct pm4_mec_release_mem *packet; - - WARN_ON(!buffer); - - packet = (struct pm4_mec_release_mem *)buffer; - memset(buffer, 0, sizeof(*packet)); - - packet->header.u32All = build_pm4_header(IT_RELEASE_MEM, - sizeof(*packet)); - - packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; - packet->bitfields2.event_index = event_index___release_mem__end_of_pipe; - packet->bitfields2.tcl1_action_ena = 1; - packet->bitfields2.tc_action_ena = 1; - packet->bitfields2.cache_policy = cache_policy___release_mem__lru; - packet->bitfields2.atc = 0; - - packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low; - packet->bitfields3.int_sel = - int_sel___release_mem__send_interrupt_after_write_confirm; - - packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; - packet->address_hi = upper_32_bits(gpu_addr); - - packet->data_lo = 0; - - return sizeof(*packet) / sizeof(unsigned int); -} - int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) { + switch (dqm->dev->device_info->asic_family) { + case CHIP_KAVERI: + case CHIP_HAWAII: + /* PM4 packet structures on CIK are the same as on VI */ + case CHIP_CARRIZO: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + pm->pmf = &kfd_vi_pm_funcs; + break; + default: + WARN(1, "Unexpected ASIC family %u", + dqm->dev->device_info->asic_family); + return -EINVAL; + } + pm->dqm = dqm; mutex_init(&pm->lock); pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); @@ -415,38 +250,25 @@ void pm_uninit(struct packet_manager *pm) int pm_send_set_resources(struct packet_manager *pm, struct scheduling_resources *res) { - struct pm4_mes_set_resources *packet; + uint32_t *buffer, size; int retval = 0; + size = pm->pmf->set_resources_size; mutex_lock(&pm->lock); pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, - sizeof(*packet) / sizeof(uint32_t), - (unsigned int **)&packet); - if (!packet) { + size / sizeof(uint32_t), + (unsigned int **)&buffer); + if (!buffer) { pr_err("Failed to allocate buffer on kernel queue\n"); retval = -ENOMEM; goto out; } - memset(packet, 0, sizeof(struct pm4_mes_set_resources)); - packet->header.u32All = build_pm4_header(IT_SET_RESOURCES, - sizeof(struct pm4_mes_set_resources)); - - packet->bitfields2.queue_type = - queue_type__mes_set_resources__hsa_interface_queue_hiq; - packet->bitfields2.vmid_mask = res->vmid_mask; - packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; - packet->bitfields7.oac_mask = res->oac_mask; - packet->bitfields8.gds_heap_base = res->gds_heap_base; - packet->bitfields8.gds_heap_size = res->gds_heap_size; - - packet->gws_mask_lo = lower_32_bits(res->gws_mask); - packet->gws_mask_hi = upper_32_bits(res->gws_mask); - - packet->queue_mask_lo = lower_32_bits(res->queue_mask); - packet->queue_mask_hi = upper_32_bits(res->queue_mask); - - pm->priv_queue->ops.submit_packet(pm->priv_queue); + retval = pm->pmf->set_resources(pm, buffer, res); + if (!retval) + pm->priv_queue->ops.submit_packet(pm->priv_queue); + else + pm->priv_queue->ops.rollback_packet(pm->priv_queue); out: mutex_unlock(&pm->lock); @@ -468,7 +290,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); - packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t); + packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); mutex_lock(&pm->lock); retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, @@ -476,7 +298,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) if (retval) goto fail_acquire_packet_buffer; - retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, + retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, rl_ib_size / sizeof(uint32_t), false); if (retval) goto fail_create_runlist; @@ -499,37 +321,29 @@ fail_create_runlist_ib: int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, uint32_t fence_value) { - int retval; - struct pm4_mes_query_status *packet; + uint32_t *buffer, size; + int retval = 0; if (WARN_ON(!fence_address)) return -EFAULT; + size = pm->pmf->query_status_size; mutex_lock(&pm->lock); - retval = pm->priv_queue->ops.acquire_packet_buffer( - pm->priv_queue, - sizeof(struct pm4_mes_query_status) / sizeof(uint32_t), - (unsigned int **)&packet); - if (retval) - goto fail_acquire_packet_buffer; - - packet->header.u32All = build_pm4_header(IT_QUERY_STATUS, - sizeof(struct pm4_mes_query_status)); - - packet->bitfields2.context_id = 0; - packet->bitfields2.interrupt_sel = - interrupt_sel__mes_query_status__completion_status; - packet->bitfields2.command = - command__mes_query_status__fence_only_after_write_ack; - - packet->addr_hi = upper_32_bits((uint64_t)fence_address); - packet->addr_lo = lower_32_bits((uint64_t)fence_address); - packet->data_hi = upper_32_bits((uint64_t)fence_value); - packet->data_lo = lower_32_bits((uint64_t)fence_value); + pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + size / sizeof(uint32_t), (unsigned int **)&buffer); + if (!buffer) { + pr_err("Failed to allocate buffer on kernel queue\n"); + retval = -ENOMEM; + goto out; + } - pm->priv_queue->ops.submit_packet(pm->priv_queue); + retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); + if (!retval) + pm->priv_queue->ops.submit_packet(pm->priv_queue); + else + pm->priv_queue->ops.rollback_packet(pm->priv_queue); -fail_acquire_packet_buffer: +out: mutex_unlock(&pm->lock); return retval; } @@ -539,82 +353,27 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, uint32_t filter_param, bool reset, unsigned int sdma_engine) { - int retval; - uint32_t *buffer; - struct pm4_mes_unmap_queues *packet; + uint32_t *buffer, size; + int retval = 0; + size = pm->pmf->unmap_queues_size; mutex_lock(&pm->lock); - retval = pm->priv_queue->ops.acquire_packet_buffer( - pm->priv_queue, - sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t), - &buffer); - if (retval) - goto err_acquire_packet_buffer; - - packet = (struct pm4_mes_unmap_queues *)buffer; - memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - pr_debug("static_queue: unmapping queues: filter is %d , reset is %d , type is %d\n", - filter, reset, type); - packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES, - sizeof(struct pm4_mes_unmap_queues)); - switch (type) { - case KFD_QUEUE_TYPE_COMPUTE: - case KFD_QUEUE_TYPE_DIQ: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__compute; - break; - case KFD_QUEUE_TYPE_SDMA: - packet->bitfields2.engine_sel = - engine_sel__mes_unmap_queues__sdma0 + sdma_engine; - break; - default: - WARN(1, "queue type %d", type); - retval = -EINVAL; - goto err_invalid; + pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, + size / sizeof(uint32_t), (unsigned int **)&buffer); + if (!buffer) { + pr_err("Failed to allocate buffer on kernel queue\n"); + retval = -ENOMEM; + goto out; } - if (reset) - packet->bitfields2.action = - action__mes_unmap_queues__reset_queues; + retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, + reset, sdma_engine); + if (!retval) + pm->priv_queue->ops.submit_packet(pm->priv_queue); else - packet->bitfields2.action = - action__mes_unmap_queues__preempt_queues; - - switch (filter) { - case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_specified_queues; - packet->bitfields2.num_queues = 1; - packet->bitfields3b.doorbell_offset0 = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_BY_PASID: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; - packet->bitfields3a.pasid = filter_param; - break; - case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_queues; - break; - case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: - /* in this case, we do not preempt static queues */ - packet->bitfields2.queue_sel = - queue_sel__mes_unmap_queues__unmap_all_non_static_queues; - break; - default: - WARN(1, "filter %d", filter); - retval = -EINVAL; - goto err_invalid; - } + pm->priv_queue->ops.rollback_packet(pm->priv_queue); - pm->priv_queue->ops.submit_packet(pm->priv_queue); - - mutex_unlock(&pm->lock); - return 0; - -err_invalid: - pm->priv_queue->ops.rollback_packet(pm->priv_queue); -err_acquire_packet_buffer: +out: mutex_unlock(&pm->lock); return retval; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index ddb3c8cdfb7b..873a8fbc14ce 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -866,8 +866,41 @@ struct packet_manager { bool allocated; struct kfd_mem_obj *ib_buffer_obj; unsigned int ib_size_bytes; + + const struct packet_manager_funcs *pmf; +}; + +struct packet_manager_funcs { + /* Support ASIC-specific packet formats for PM4 packets */ + int (*map_process)(struct packet_manager *pm, uint32_t *buffer, + struct qcm_process_device *qpd); + int (*runlist)(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain); + int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res); + int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static); + int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, + enum kfd_queue_type type, + enum kfd_unmap_queues_filter mode, + uint32_t filter_param, bool reset, + unsigned int sdma_engine); + int (*query_status)(struct packet_manager *pm, uint32_t *buffer, + uint64_t fence_address, uint32_t fence_value); + int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); + + /* Packet sizes */ + int map_process_size; + int runlist_size; + int set_resources_size; + int map_queues_size; + int unmap_queues_size; + int query_status_size; + int release_mem_size; }; +extern const struct packet_manager_funcs kfd_vi_pm_funcs; + int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); void pm_uninit(struct packet_manager *pm); int pm_send_set_resources(struct packet_manager *pm, @@ -883,8 +916,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, void pm_release_ib(struct packet_manager *pm); -uint32_t pm_create_release_mem(uint64_t gpu_addr, uint32_t *buffer); - uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ -- cgit v1.2.1 From 454150b1f9a6be0a69138a698471bd13424204cc Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:07 -0400 Subject: drm/amdkfd: Add GFXv9 PM4 packet writer functions Signed-off-by: Shaoyun Liu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/Makefile | 7 +- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 331 +++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 18 +- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 4 + drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 583 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 6 + 6 files changed, 937 insertions(+), 12 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 0d0242240c47..52b3c1b419f1 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -31,9 +31,10 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_process.o kfd_queue.o kfd_mqd_manager.o \ kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \ kfd_kernel_queue.o kfd_kernel_queue_cik.o \ - kfd_kernel_queue_vi.o kfd_packet_manager.o \ - kfd_process_queue_manager.o kfd_device_queue_manager.o \ - kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \ + kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \ + kfd_packet_manager.o kfd_process_queue_manager.o \ + kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \ + kfd_device_queue_manager_vi.o \ kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c new file mode 100644 index 000000000000..ece7d59537b7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -0,0 +1,331 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_kernel_queue.h" +#include "kfd_device_queue_manager.h" +#include "kfd_pm4_headers_ai.h" +#include "kfd_pm4_opcodes.h" + +static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size); +static void uninitialize_v9(struct kernel_queue *kq); + +void kernel_queue_init_v9(struct kernel_queue_ops *ops) +{ + ops->initialize = initialize_v9; + ops->uninitialize = uninitialize_v9; +} + +static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, + enum kfd_queue_type type, unsigned int queue_size) +{ + int retval; + + retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); + if (retval) + return false; + + kq->eop_gpu_addr = kq->eop_mem->gpu_addr; + kq->eop_kernel_addr = kq->eop_mem->cpu_ptr; + + memset(kq->eop_kernel_addr, 0, PAGE_SIZE); + + return true; +} + +static void uninitialize_v9(struct kernel_queue *kq) +{ + kfd_gtt_sa_free(kq->dev, kq->eop_mem); +} + +static int pm_map_process_v9(struct packet_manager *pm, + uint32_t *buffer, struct qcm_process_device *qpd) +{ + struct pm4_mes_map_process *packet; + uint64_t vm_page_table_base_addr = + (uint64_t)(qpd->page_table_base) << 12; + + packet = (struct pm4_mes_map_process *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_process)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, + sizeof(struct pm4_mes_map_process)); + packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; + packet->bitfields2.process_quantum = 1; + packet->bitfields2.pasid = qpd->pqm->process->pasid; + packet->bitfields14.gds_size = qpd->gds_size; + packet->bitfields14.num_gws = qpd->num_gws; + packet->bitfields14.num_oac = qpd->num_oac; + packet->bitfields14.sdma_enable = 1; + packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + + packet->sh_mem_config = qpd->sh_mem_config; + packet->sh_mem_bases = qpd->sh_mem_bases; + packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); + packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); + packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); + packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); + + packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); + packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); + + packet->vm_context_page_table_base_addr_lo32 = + lower_32_bits(vm_page_table_base_addr); + packet->vm_context_page_table_base_addr_hi32 = + upper_32_bits(vm_page_table_base_addr); + + return 0; +} + +static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, + uint64_t ib, size_t ib_size_in_dwords, bool chain) +{ + struct pm4_mes_runlist *packet; + + int concurrent_proc_cnt = 0; + struct kfd_dev *kfd = pm->dqm->dev; + + /* Determine the number of processes to map together to HW: + * it can not exceed the number of VMIDs available to the + * scheduler, and it is determined by the smaller of the number + * of processes in the runlist and kfd module parameter + * hws_max_conc_proc. + * Note: the arbitration between the number of VMIDs and + * hws_max_conc_proc has been done in + * kgd2kfd_device_init(). + */ + concurrent_proc_cnt = min(pm->dqm->processes_count, + kfd->max_proc_per_quantum); + + packet = (struct pm4_mes_runlist *)buffer; + + memset(buffer, 0, sizeof(struct pm4_mes_runlist)); + packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, + sizeof(struct pm4_mes_runlist)); + + packet->bitfields4.ib_size = ib_size_in_dwords; + packet->bitfields4.chain = chain ? 1 : 0; + packet->bitfields4.offload_polling = 0; + packet->bitfields4.valid = 1; + packet->bitfields4.process_cnt = concurrent_proc_cnt; + packet->ordinal2 = lower_32_bits(ib); + packet->ib_base_hi = upper_32_bits(ib); + + return 0; +} + +static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, + struct queue *q, bool is_static) +{ + struct pm4_mes_map_queues *packet; + bool use_static = is_static; + + packet = (struct pm4_mes_map_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, + sizeof(struct pm4_mes_map_queues)); + packet->bitfields2.alloc_format = + alloc_format__mes_map_queues__one_per_pipe_vi; + packet->bitfields2.num_queues = 1; + packet->bitfields2.queue_sel = + queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; + + packet->bitfields2.engine_sel = + engine_sel__mes_map_queues__compute_vi; + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_compute_vi; + + switch (q->properties.type) { + case KFD_QUEUE_TYPE_COMPUTE: + if (use_static) + packet->bitfields2.queue_type = + queue_type__mes_map_queues__normal_latency_static_queue_vi; + break; + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.queue_type = + queue_type__mes_map_queues__debug_interface_queue_vi; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = q->properties.sdma_engine_id + + engine_sel__mes_map_queues__sdma0_vi; + use_static = false; /* no static queues under SDMA */ + break; + default: + WARN(1, "queue type %d", q->properties.type); + return -EINVAL; + } + packet->bitfields3.doorbell_offset = + q->properties.doorbell_off; + + packet->mqd_addr_lo = + lower_32_bits(q->gart_mqd_addr); + + packet->mqd_addr_hi = + upper_32_bits(q->gart_mqd_addr); + + packet->wptr_addr_lo = + lower_32_bits((uint64_t)q->properties.write_ptr); + + packet->wptr_addr_hi = + upper_32_bits((uint64_t)q->properties.write_ptr); + + return 0; +} + +static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, + enum kfd_queue_type type, + enum kfd_unmap_queues_filter filter, + uint32_t filter_param, bool reset, + unsigned int sdma_engine) +{ + struct pm4_mes_unmap_queues *packet; + + packet = (struct pm4_mes_unmap_queues *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); + + packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, + sizeof(struct pm4_mes_unmap_queues)); + switch (type) { + case KFD_QUEUE_TYPE_COMPUTE: + case KFD_QUEUE_TYPE_DIQ: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__compute; + break; + case KFD_QUEUE_TYPE_SDMA: + packet->bitfields2.engine_sel = + engine_sel__mes_unmap_queues__sdma0 + sdma_engine; + break; + default: + WARN(1, "queue type %d", type); + return -EINVAL; + } + + if (reset) + packet->bitfields2.action = + action__mes_unmap_queues__reset_queues; + else + packet->bitfields2.action = + action__mes_unmap_queues__preempt_queues; + + switch (filter) { + case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_specified_queues; + packet->bitfields2.num_queues = 1; + packet->bitfields3b.doorbell_offset0 = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_BY_PASID: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; + packet->bitfields3a.pasid = filter_param; + break; + case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_queues; + break; + case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: + /* in this case, we do not preempt static queues */ + packet->bitfields2.queue_sel = + queue_sel__mes_unmap_queues__unmap_all_non_static_queues; + break; + default: + WARN(1, "filter %d", filter); + return -EINVAL; + } + + return 0; + +} + +static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, + uint64_t fence_address, uint32_t fence_value) +{ + struct pm4_mes_query_status *packet; + + packet = (struct pm4_mes_query_status *)buffer; + memset(buffer, 0, sizeof(struct pm4_mes_query_status)); + + + packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, + sizeof(struct pm4_mes_query_status)); + + packet->bitfields2.context_id = 0; + packet->bitfields2.interrupt_sel = + interrupt_sel__mes_query_status__completion_status; + packet->bitfields2.command = + command__mes_query_status__fence_only_after_write_ack; + + packet->addr_hi = upper_32_bits((uint64_t)fence_address); + packet->addr_lo = lower_32_bits((uint64_t)fence_address); + packet->data_hi = upper_32_bits((uint64_t)fence_value); + packet->data_lo = lower_32_bits((uint64_t)fence_value); + + return 0; +} + + +static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer) +{ + struct pm4_mec_release_mem *packet; + + packet = (struct pm4_mec_release_mem *)buffer; + memset(buffer, 0, sizeof(struct pm4_mec_release_mem)); + + packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, + sizeof(struct pm4_mec_release_mem)); + + packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; + packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe; + packet->bitfields2.tcl1_action_ena = 1; + packet->bitfields2.tc_action_ena = 1; + packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru; + + packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low; + packet->bitfields3.int_sel = + int_sel__mec_release_mem__send_interrupt_after_write_confirm; + + packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2; + packet->address_hi = upper_32_bits(gpu_addr); + + packet->data_lo = 0; + + return 0; +} + +const struct packet_manager_funcs kfd_v9_pm_funcs = { + .map_process = pm_map_process_v9, + .runlist = pm_runlist_v9, + .set_resources = pm_set_resources_vi, + .map_queues = pm_map_queues_v9, + .unmap_queues = pm_unmap_queues_v9, + .query_status = pm_query_status_v9, + .release_mem = pm_release_mem_v9, + .map_process_size = sizeof(struct pm4_mes_map_process), + .runlist_size = sizeof(struct pm4_mes_runlist), + .set_resources_size = sizeof(struct pm4_mes_set_resources), + .map_queues_size = sizeof(struct pm4_mes_map_queues), + .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .query_status_size = sizeof(struct pm4_mes_query_status), + .release_mem_size = sizeof(struct pm4_mec_release_mem) +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c index 7ee326fa486d..f9019efd31b9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -58,7 +58,7 @@ static void uninitialize_vi(struct kernel_queue *kq) kfd_gtt_sa_free(kq->dev, kq->eop_mem); } -static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size) +unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) { union PM4_MES_TYPE_3_HEADER header; @@ -79,7 +79,7 @@ static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer, memset(buffer, 0, sizeof(struct pm4_mes_map_process)); - packet->header.u32All = build_pm4_header(IT_MAP_PROCESS, + packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS, sizeof(struct pm4_mes_map_process)); packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; packet->bitfields2.process_quantum = 1; @@ -128,7 +128,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, packet = (struct pm4_mes_runlist *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_runlist)); - packet->header.u32All = build_pm4_header(IT_RUN_LIST, + packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST, sizeof(struct pm4_mes_runlist)); packet->bitfields4.ib_size = ib_size_in_dwords; @@ -142,7 +142,7 @@ static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer, return 0; } -static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, +int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, struct scheduling_resources *res) { struct pm4_mes_set_resources *packet; @@ -150,7 +150,7 @@ static int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, packet = (struct pm4_mes_set_resources *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); - packet->header.u32All = build_pm4_header(IT_SET_RESOURCES, + packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES, sizeof(struct pm4_mes_set_resources)); packet->bitfields2.queue_type = @@ -179,7 +179,7 @@ static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer, packet = (struct pm4_mes_map_queues *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); - packet->header.u32All = build_pm4_header(IT_MAP_QUEUES, + packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES, sizeof(struct pm4_mes_map_queues)); packet->bitfields2.alloc_format = alloc_format__mes_map_queues__one_per_pipe_vi; @@ -240,7 +240,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer, packet = (struct pm4_mes_unmap_queues *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); - packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES, + packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES, sizeof(struct pm4_mes_unmap_queues)); switch (type) { case KFD_QUEUE_TYPE_COMPUTE: @@ -302,7 +302,7 @@ static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer, packet = (struct pm4_mes_query_status *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_query_status)); - packet->header.u32All = build_pm4_header(IT_QUERY_STATUS, + packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS, sizeof(struct pm4_mes_query_status)); packet->bitfields2.context_id = 0; @@ -326,7 +326,7 @@ static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer) packet = (struct pm4_mec_release_mem *)buffer; memset(buffer, 0, sizeof(*packet)); - packet->header.u32All = build_pm4_header(IT_RELEASE_MEM, + packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM, sizeof(*packet)); packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 860ff2481747..91f0350b6180 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -223,6 +223,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_POLARIS11: pm->pmf = &kfd_vi_pm_funcs; break; + case CHIP_VEGA10: + case CHIP_RAVEN: + pm->pmf = &kfd_v9_pm_funcs; + break; default: WARN(1, "Unexpected ASIC family %u", dqm->dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h new file mode 100644 index 000000000000..f2bcf5c092ea --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -0,0 +1,583 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef F32_MES_PM4_PACKETS_H +#define F32_MES_PM4_PACKETS_H + +#ifndef PM4_MES_HEADER_DEFINED +#define PM4_MES_HEADER_DEFINED +union PM4_MES_TYPE_3_HEADER { + struct { + uint32_t reserved1 : 8; /* < reserved */ + uint32_t opcode : 8; /* < IT opcode */ + uint32_t count : 14;/* < number of DWORDs - 1 in the + * information body. + */ + uint32_t type : 2; /* < packet identifier. + * It should be 3 for type 3 packets + */ + }; + uint32_t u32All; +}; +#endif /* PM4_MES_HEADER_DEFINED */ + +/*--------------------MES_SET_RESOURCES--------------------*/ + +#ifndef PM4_MES_SET_RESOURCES_DEFINED +#define PM4_MES_SET_RESOURCES_DEFINED +enum mes_set_resources_queue_type_enum { + queue_type__mes_set_resources__kernel_interface_queue_kiq = 0, + queue_type__mes_set_resources__hsa_interface_queue_hiq = 1, + queue_type__mes_set_resources__hsa_debug_interface_queue = 4 +}; + + +struct pm4_mes_set_resources { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t vmid_mask:16; + uint32_t unmap_latency:8; + uint32_t reserved1:5; + enum mes_set_resources_queue_type_enum queue_type:3; + } bitfields2; + uint32_t ordinal2; + }; + + uint32_t queue_mask_lo; + uint32_t queue_mask_hi; + uint32_t gws_mask_lo; + uint32_t gws_mask_hi; + + union { + struct { + uint32_t oac_mask:16; + uint32_t reserved2:16; + } bitfields7; + uint32_t ordinal7; + }; + + union { + struct { + uint32_t gds_heap_base:6; + uint32_t reserved3:5; + uint32_t gds_heap_size:6; + uint32_t reserved4:15; + } bitfields8; + uint32_t ordinal8; + }; + +}; +#endif + +/*--------------------MES_RUN_LIST--------------------*/ + +#ifndef PM4_MES_RUN_LIST_DEFINED +#define PM4_MES_RUN_LIST_DEFINED + +struct pm4_mes_runlist { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:2; + uint32_t ib_base_lo:30; + } bitfields2; + uint32_t ordinal2; + }; + + uint32_t ib_base_hi; + + union { + struct { + uint32_t ib_size:20; + uint32_t chain:1; + uint32_t offload_polling:1; + uint32_t reserved2:1; + uint32_t valid:1; + uint32_t process_cnt:4; + uint32_t reserved3:4; + } bitfields4; + uint32_t ordinal4; + }; + +}; +#endif + +/*--------------------MES_MAP_PROCESS--------------------*/ + +#ifndef PM4_MES_MAP_PROCESS_DEFINED +#define PM4_MES_MAP_PROCESS_DEFINED + +struct pm4_mes_map_process { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:8; + uint32_t diq_enable:1; + uint32_t process_quantum:7; + } bitfields2; + uint32_t ordinal2; + }; + + uint32_t vm_context_page_table_base_addr_lo32; + + uint32_t vm_context_page_table_base_addr_hi32; + + uint32_t sh_mem_bases; + + uint32_t sh_mem_config; + + uint32_t sq_shader_tba_lo; + + uint32_t sq_shader_tba_hi; + + uint32_t sq_shader_tma_lo; + + uint32_t sq_shader_tma_hi; + + uint32_t reserved6; + + uint32_t gds_addr_lo; + + uint32_t gds_addr_hi; + + union { + struct { + uint32_t num_gws:6; + uint32_t reserved7:1; + uint32_t sdma_enable:1; + uint32_t num_oac:4; + uint32_t reserved8:4; + uint32_t gds_size:6; + uint32_t num_queues:10; + } bitfields14; + uint32_t ordinal14; + }; + + uint32_t completion_signal_lo; + + uint32_t completion_signal_hi; + +}; + +#endif + +/*--------------------MES_MAP_PROCESS_VM--------------------*/ + +#ifndef PM4_MES_MAP_PROCESS_VM_DEFINED +#define PM4_MES_MAP_PROCESS_VM_DEFINED + +struct PM4_MES_MAP_PROCESS_VM { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + uint32_t reserved1; + + uint32_t vm_context_cntl; + + uint32_t reserved2; + + uint32_t vm_context_page_table_end_addr_lo32; + + uint32_t vm_context_page_table_end_addr_hi32; + + uint32_t vm_context_page_table_start_addr_lo32; + + uint32_t vm_context_page_table_start_addr_hi32; + + uint32_t reserved3; + + uint32_t reserved4; + + uint32_t reserved5; + + uint32_t reserved6; + + uint32_t reserved7; + + uint32_t reserved8; + + uint32_t completion_signal_lo32; + + uint32_t completion_signal_hi32; + +}; +#endif + +/*--------------------MES_MAP_QUEUES--------------------*/ + +#ifndef PM4_MES_MAP_QUEUES_VI_DEFINED +#define PM4_MES_MAP_QUEUES_VI_DEFINED +enum mes_map_queues_queue_sel_enum { + queue_sel__mes_map_queues__map_to_specified_queue_slots_vi = 0, +queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi = 1 +}; + +enum mes_map_queues_queue_type_enum { + queue_type__mes_map_queues__normal_compute_vi = 0, + queue_type__mes_map_queues__debug_interface_queue_vi = 1, + queue_type__mes_map_queues__normal_latency_static_queue_vi = 2, +queue_type__mes_map_queues__low_latency_static_queue_vi = 3 +}; + +enum mes_map_queues_alloc_format_enum { + alloc_format__mes_map_queues__one_per_pipe_vi = 0, +alloc_format__mes_map_queues__all_on_one_pipe_vi = 1 +}; + +enum mes_map_queues_engine_sel_enum { + engine_sel__mes_map_queues__compute_vi = 0, + engine_sel__mes_map_queues__sdma0_vi = 2, + engine_sel__mes_map_queues__sdma1_vi = 3 +}; + + +struct pm4_mes_map_queues { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t reserved1:4; + enum mes_map_queues_queue_sel_enum queue_sel:2; + uint32_t reserved2:15; + enum mes_map_queues_queue_type_enum queue_type:3; + enum mes_map_queues_alloc_format_enum alloc_format:2; + enum mes_map_queues_engine_sel_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t reserved3:1; + uint32_t check_disable:1; + uint32_t doorbell_offset:26; + uint32_t reserved4:4; + } bitfields3; + uint32_t ordinal3; + }; + + uint32_t mqd_addr_lo; + uint32_t mqd_addr_hi; + uint32_t wptr_addr_lo; + uint32_t wptr_addr_hi; +}; +#endif + +/*--------------------MES_QUERY_STATUS--------------------*/ + +#ifndef PM4_MES_QUERY_STATUS_DEFINED +#define PM4_MES_QUERY_STATUS_DEFINED +enum mes_query_status_interrupt_sel_enum { + interrupt_sel__mes_query_status__completion_status = 0, + interrupt_sel__mes_query_status__process_status = 1, + interrupt_sel__mes_query_status__queue_status = 2 +}; + +enum mes_query_status_command_enum { + command__mes_query_status__interrupt_only = 0, + command__mes_query_status__fence_only_immediate = 1, + command__mes_query_status__fence_only_after_write_ack = 2, + command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3 +}; + +enum mes_query_status_engine_sel_enum { + engine_sel__mes_query_status__compute = 0, + engine_sel__mes_query_status__sdma0_queue = 2, + engine_sel__mes_query_status__sdma1_queue = 3 +}; + +struct pm4_mes_query_status { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + uint32_t context_id:28; + enum mes_query_status_interrupt_sel_enum interrupt_sel:2; + enum mes_query_status_command_enum command:2; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved1:16; + } bitfields3a; + struct { + uint32_t reserved2:2; + uint32_t doorbell_offset:26; + enum mes_query_status_engine_sel_enum engine_sel:3; + uint32_t reserved3:1; + } bitfields3b; + uint32_t ordinal3; + }; + + uint32_t addr_lo; + uint32_t addr_hi; + uint32_t data_lo; + uint32_t data_hi; +}; +#endif + +/*--------------------MES_UNMAP_QUEUES--------------------*/ + +#ifndef PM4_MES_UNMAP_QUEUES_DEFINED +#define PM4_MES_UNMAP_QUEUES_DEFINED +enum mes_unmap_queues_action_enum { + action__mes_unmap_queues__preempt_queues = 0, + action__mes_unmap_queues__reset_queues = 1, + action__mes_unmap_queues__disable_process_queues = 2, + action__mes_unmap_queues__reserved = 3 +}; + +enum mes_unmap_queues_queue_sel_enum { + queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0, + queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1, + queue_sel__mes_unmap_queues__unmap_all_queues = 2, + queue_sel__mes_unmap_queues__unmap_all_non_static_queues = 3 +}; + +enum mes_unmap_queues_engine_sel_enum { + engine_sel__mes_unmap_queues__compute = 0, + engine_sel__mes_unmap_queues__sdma0 = 2, + engine_sel__mes_unmap_queues__sdmal = 3 +}; + +struct pm4_mes_unmap_queues { + union { + union PM4_MES_TYPE_3_HEADER header; /* header */ + uint32_t ordinal1; + }; + + union { + struct { + enum mes_unmap_queues_action_enum action:2; + uint32_t reserved1:2; + enum mes_unmap_queues_queue_sel_enum queue_sel:2; + uint32_t reserved2:20; + enum mes_unmap_queues_engine_sel_enum engine_sel:3; + uint32_t num_queues:3; + } bitfields2; + uint32_t ordinal2; + }; + + union { + struct { + uint32_t pasid:16; + uint32_t reserved3:16; + } bitfields3a; + struct { + uint32_t reserved4:2; + uint32_t doorbell_offset0:26; + int32_t reserved5:4; + } bitfields3b; + uint32_t ordinal3; + }; + + union { + struct { + uint32_t reserved6:2; + uint32_t doorbell_offset1:26; + uint32_t reserved7:4; + } bitfields4; + uint32_t ordinal4; + }; + + union { + struct { + uint32_t reserved8:2; + uint32_t doorbell_offset2:26; + uint32_t reserved9:4; + } bitfields5; + uint32_t ordinal5; + }; + + union { + struct { + uint32_t reserved10:2; + uint32_t doorbell_offset3:26; + uint32_t reserved11:4; + } bitfields6; + uint32_t ordinal6; + }; +}; +#endif + +#ifndef PM4_MEC_RELEASE_MEM_DEFINED +#define PM4_MEC_RELEASE_MEM_DEFINED + +enum mec_release_mem_event_index_enum { + event_index__mec_release_mem__end_of_pipe = 5, + event_index__mec_release_mem__shader_done = 6 +}; + +enum mec_release_mem_cache_policy_enum { + cache_policy__mec_release_mem__lru = 0, + cache_policy__mec_release_mem__stream = 1 +}; + +enum mec_release_mem_pq_exe_status_enum { + pq_exe_status__mec_release_mem__default = 0, + pq_exe_status__mec_release_mem__phase_update = 1 +}; + +enum mec_release_mem_dst_sel_enum { + dst_sel__mec_release_mem__memory_controller = 0, + dst_sel__mec_release_mem__tc_l2 = 1, + dst_sel__mec_release_mem__queue_write_pointer_register = 2, + dst_sel__mec_release_mem__queue_write_pointer_poll_mask_bit = 3 +}; + +enum mec_release_mem_int_sel_enum { + int_sel__mec_release_mem__none = 0, + int_sel__mec_release_mem__send_interrupt_only = 1, + int_sel__mec_release_mem__send_interrupt_after_write_confirm = 2, + int_sel__mec_release_mem__send_data_after_write_confirm = 3, + int_sel__mec_release_mem__unconditionally_send_int_ctxid = 4, + int_sel__mec_release_mem__conditionally_send_int_ctxid_based_on_32_bit_compare = 5, + int_sel__mec_release_mem__conditionally_send_int_ctxid_based_on_64_bit_compare = 6 +}; + +enum mec_release_mem_data_sel_enum { + data_sel__mec_release_mem__none = 0, + data_sel__mec_release_mem__send_32_bit_low = 1, + data_sel__mec_release_mem__send_64_bit_data = 2, + data_sel__mec_release_mem__send_gpu_clock_counter = 3, + data_sel__mec_release_mem__send_cp_perfcounter_hi_lo = 4, + data_sel__mec_release_mem__store_gds_data_to_memory = 5 +}; + +struct pm4_mec_release_mem { + union { + union PM4_MES_TYPE_3_HEADER header; /*header */ + unsigned int ordinal1; + }; + + union { + struct { + unsigned int event_type:6; + unsigned int reserved1:2; + enum mec_release_mem_event_index_enum event_index:4; + unsigned int tcl1_vol_action_ena:1; + unsigned int tc_vol_action_ena:1; + unsigned int reserved2:1; + unsigned int tc_wb_action_ena:1; + unsigned int tcl1_action_ena:1; + unsigned int tc_action_ena:1; + uint32_t reserved3:1; + uint32_t tc_nc_action_ena:1; + uint32_t tc_wc_action_ena:1; + uint32_t tc_md_action_ena:1; + uint32_t reserved4:3; + enum mec_release_mem_cache_policy_enum cache_policy:2; + uint32_t reserved5:2; + enum mec_release_mem_pq_exe_status_enum pq_exe_status:1; + uint32_t reserved6:2; + } bitfields2; + unsigned int ordinal2; + }; + + union { + struct { + uint32_t reserved7:16; + enum mec_release_mem_dst_sel_enum dst_sel:2; + uint32_t reserved8:6; + enum mec_release_mem_int_sel_enum int_sel:3; + uint32_t reserved9:2; + enum mec_release_mem_data_sel_enum data_sel:3; + } bitfields3; + unsigned int ordinal3; + }; + + union { + struct { + uint32_t reserved10:2; + unsigned int address_lo_32b:30; + } bitfields4; + struct { + uint32_t reserved11:3; + uint32_t address_lo_64b:29; + } bitfields4b; + uint32_t reserved12; + unsigned int ordinal4; + }; + + union { + uint32_t address_hi; + uint32_t reserved13; + uint32_t ordinal5; + }; + + union { + uint32_t data_lo; + uint32_t cmp_data_lo; + struct { + uint32_t dw_offset:16; + uint32_t num_dwords:16; + } bitfields6c; + uint32_t reserved14; + uint32_t ordinal6; + }; + + union { + uint32_t data_hi; + uint32_t cmp_data_hi; + uint32_t reserved15; + uint32_t reserved16; + uint32_t ordinal7; + }; + + uint32_t int_ctxid; + +}; + +#endif + +enum { + CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 +}; +#endif + diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 873a8fbc14ce..b68299a3e18a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -900,6 +900,7 @@ struct packet_manager_funcs { }; extern const struct packet_manager_funcs kfd_vi_pm_funcs; +extern const struct packet_manager_funcs kfd_v9_pm_funcs; int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); void pm_uninit(struct packet_manager *pm); @@ -916,6 +917,11 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, void pm_release_ib(struct packet_manager *pm); +/* Following PM funcs can be shared among VI and AI */ +unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); +int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, + struct scheduling_resources *res); + uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ -- cgit v1.2.1 From b91d43dd01aadd43b1002160b78d77f8175876a4 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:08 -0400 Subject: drm/amdkfd: Add GFXv9 MQD manager Signed-off-by: John Bridgman Signed-off-by: Jay Cornwall Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/Makefile | 1 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 3 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 443 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + 5 files changed, 451 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 52b3c1b419f1..094b591ed8c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -30,6 +30,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \ kfd_process.o kfd_queue.o kfd_mqd_manager.o \ kfd_mqd_manager_cik.o kfd_mqd_manager_vi.o \ + kfd_mqd_manager_v9.o \ kfd_kernel_queue.o kfd_kernel_queue_cik.o \ kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \ kfd_packet_manager.o kfd_process_queue_manager.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index f563acbc1ad7..c368ce3e96ff 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -700,7 +700,7 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) return -ENOMEM; - *mem_obj = kmalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); + *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); if ((*mem_obj) == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index ee7061e1c466..4b8eb506642b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -38,6 +38,9 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, case CHIP_POLARIS10: case CHIP_POLARIS11: return mqd_manager_init_vi_tonga(type, dev); + case CHIP_VEGA10: + case CHIP_RAVEN: + return mqd_manager_init_v9(type, dev); default: WARN(1, "Unexpected ASIC family %u", dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c new file mode 100644 index 000000000000..684054ff02cd --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -0,0 +1,443 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include +#include +#include "kfd_priv.h" +#include "kfd_mqd_manager.h" +#include "v9_structs.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" +#include "sdma0/sdma0_4_0_sh_mask.h" + +static inline struct v9_mqd *get_mqd(void *mqd) +{ + return (struct v9_mqd *)mqd; +} + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static int init_mqd(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + int retval; + uint64_t addr; + struct v9_mqd *m; + struct kfd_dev *kfd = mm->dev; + + /* From V9, for CWSR, the control stack is located on the next page + * boundary after the mqd, we will use the gtt allocation function + * instead of sub-allocation function. + */ + if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { + *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); + if (!*mqd_mem_obj) + return -ENOMEM; + retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd, + ALIGN(q->ctl_stack_size, PAGE_SIZE) + + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), + &((*mqd_mem_obj)->gtt_mem), + &((*mqd_mem_obj)->gpu_addr), + (void *)&((*mqd_mem_obj)->cpu_ptr)); + } else + retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), + mqd_mem_obj); + if (retval != 0) + return -ENOMEM; + + m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr; + addr = (*mqd_mem_obj)->gpu_addr; + + memset(m, 0, sizeof(struct v9_mqd)); + + m->header = 0xC0310800; + m->compute_pipelinestat_enable = 1; + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; + + m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | + 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; + + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; + + m->cp_mqd_base_addr_lo = lower_32_bits(addr); + m->cp_mqd_base_addr_hi = upper_32_bits(addr); + + m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | + 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | + 10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; + + m->cp_hqd_pipe_priority = 1; + m->cp_hqd_queue_priority = 15; + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->cp_hqd_aql_control = + 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; + } + + if (q->tba_addr) { + m->compute_pgm_rsrc2 |= + (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); + } + + if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { + m->cp_hqd_persistent_state |= + (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); + m->cp_hqd_ctx_save_base_addr_lo = + lower_32_bits(q->ctx_save_restore_area_address); + m->cp_hqd_ctx_save_base_addr_hi = + upper_32_bits(q->ctx_save_restore_area_address); + m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; + m->cp_hqd_cntl_stack_size = q->ctl_stack_size; + m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; + m->cp_hqd_wg_state_offset = q->ctl_stack_size; + } + + *mqd = m; + if (gart_addr) + *gart_addr = addr; + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static int load_mqd(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); + + return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, + wptr_shift, 0, mms); +} + +static int update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct v9_mqd *m; + + m = get_mqd(mqd); + + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; + m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); + + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); + + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); + m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); + + m->cp_hqd_pq_doorbell_control = + q->doorbell_off << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; + pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", + m->cp_hqd_pq_doorbell_control); + + m->cp_hqd_ib_control = + 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | + 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT; + + /* + * HW does not clamp this field correctly. Maximum EOP queue size + * is constrained by per-SE EOP done signal count, which is 8-bit. + * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit + * more than (EOP entry count - 1) so a queue size of 0x800 dwords + * is safe, giving a maximum field value of 0xA. + */ + m->cp_hqd_eop_control = min(0xA, + order_base_2(q->eop_ring_buffer_size / 4) - 1); + m->cp_hqd_eop_base_addr_lo = + lower_32_bits(q->eop_ring_buffer_address >> 8); + m->cp_hqd_eop_base_addr_hi = + upper_32_bits(q->eop_ring_buffer_address >> 8); + + m->cp_hqd_iq_timer = 0; + + m->cp_hqd_vmid = q->vmid; + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | + 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | + 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | + 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; + m->cp_hqd_pq_doorbell_control |= 1 << + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; + } + if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) + m->cp_hqd_ctx_save_control = 0; + + q->is_active = (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0 && + !q->is_evicted); + + return 0; +} + + +static int destroy_mqd(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_destroy + (mm->dev->kgd, mqd, type, timeout, + pipe_id, queue_id); +} + +static void uninit_mqd(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) +{ + struct kfd_dev *kfd = mm->dev; + + if (mqd_mem_obj->gtt_mem) { + kfd->kfd2kgd->free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); + kfree(mqd_mem_obj); + } else { + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); + } +} + +static bool is_occupied(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_is_occupied( + mm->dev->kgd, queue_address, + pipe_id, queue_id); +} + +static int init_mqd_hiq(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct v9_mqd *m; + int retval = init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); + + if (retval != 0) + return retval; + + m = get_mqd(*mqd); + + m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; + + return retval; +} + +static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct v9_mqd *m; + int retval = update_mqd(mm, mqd, q); + + if (retval != 0) + return retval; + + /* TODO: what's the point? update_mqd already does this. */ + m = get_mqd(mqd); + m->cp_hqd_vmid = q->vmid; + return retval; +} + +static int init_mqd_sdma(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + int retval; + struct v9_sdma_mqd *m; + + + retval = kfd_gtt_sa_allocate(mm->dev, + sizeof(struct v9_sdma_mqd), + mqd_mem_obj); + + if (retval != 0) + return -ENOMEM; + + m = (struct v9_sdma_mqd *) (*mqd_mem_obj)->cpu_ptr; + + memset(m, 0, sizeof(struct v9_sdma_mqd)); + + *mqd = m; + if (gart_addr) + *gart_addr = (*mqd_mem_obj)->gpu_addr; + + retval = mm->update_mqd(mm, m, q); + + return retval; +} + +static void uninit_mqd_sdma(struct mqd_manager *mm, void *mqd, + struct kfd_mem_obj *mqd_mem_obj) +{ + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); +} + +static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + (uint32_t __user *)p->write_ptr, + mms); +} + +#define SDMA_RLC_DUMMY_DEFAULT 0xf + +static int update_mqd_sdma(struct mqd_manager *mm, void *mqd, + struct queue_properties *q) +{ + struct v9_sdma_mqd *m; + + m = get_sdma_mqd(mqd); + m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | + q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | + 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | + 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; + + m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); + m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); + m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); + m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); + m->sdmax_rlcx_doorbell_offset = + q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT; + + m->sdma_engine_id = q->sdma_engine_id; + m->sdma_queue_id = q->sdma_queue_id; + m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; + + q->is_active = (q->queue_size > 0 && + q->queue_address != 0 && + q->queue_percent > 0 && + !q->is_evicted); + + return 0; +} + +/* + * * preempt type here is ignored because there is only one way + * * to preempt sdma queue + */ +static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); +} + +static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) +{ + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); +} + +#if defined(CONFIG_DEBUG_FS) + +static int debugfs_show_mqd(struct seq_file *m, void *data) +{ + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, + data, sizeof(struct v9_mqd), false); + return 0; +} + +static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) +{ + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, + data, sizeof(struct v9_sdma_mqd), false); + return 0; +} + +#endif + +struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, + struct kfd_dev *dev) +{ + struct mqd_manager *mqd; + + if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) + return NULL; + + mqd = kzalloc(sizeof(*mqd), GFP_NOIO); + if (!mqd) + return NULL; + + mqd->dev = dev; + + switch (type) { + case KFD_MQD_TYPE_CP: + case KFD_MQD_TYPE_COMPUTE: + mqd->init_mqd = init_mqd; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; +#if defined(CONFIG_DEBUG_FS) + mqd->debugfs_show_mqd = debugfs_show_mqd; +#endif + break; + case KFD_MQD_TYPE_HIQ: + mqd->init_mqd = init_mqd_hiq; + mqd->uninit_mqd = uninit_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd_hiq; + mqd->destroy_mqd = destroy_mqd; + mqd->is_occupied = is_occupied; +#if defined(CONFIG_DEBUG_FS) + mqd->debugfs_show_mqd = debugfs_show_mqd; +#endif + break; + case KFD_MQD_TYPE_SDMA: + mqd->init_mqd = init_mqd_sdma; + mqd->uninit_mqd = uninit_mqd_sdma; + mqd->load_mqd = load_mqd_sdma; + mqd->update_mqd = update_mqd_sdma; + mqd->destroy_mqd = destroy_mqd_sdma; + mqd->is_occupied = is_occupied_sdma; +#if defined(CONFIG_DEBUG_FS) + mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; +#endif + break; + default: + kfree(mqd); + return NULL; + } + + return mqd; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index b68299a3e18a..fac28827b000 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -197,6 +197,7 @@ struct kfd_mem_obj { uint32_t range_end; uint64_t gpu_addr; uint32_t *cpu_ptr; + void *gtt_mem; }; struct kfd_vmid_info { @@ -822,6 +823,8 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, struct kfd_dev *dev); struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, struct kfd_dev *dev); +struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, + struct kfd_dev *dev); struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); void device_queue_manager_uninit(struct device_queue_manager *dqm); struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, -- cgit v1.2.1 From bed4f110251b4f9041e5e797e035bc40c34d60ea Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:09 -0400 Subject: drm/amdkfd: Add GFXv9 device queue manager Signed-off-by: John Bridgman Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/Makefile | 2 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 10 ++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 2 + .../drm/amd/amdkfd/kfd_device_queue_manager_v9.c | 84 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_module.c | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 ++ 6 files changed, 106 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 094b591ed8c2..ff8b5aa11f4e 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -35,7 +35,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_kernel_queue_vi.o kfd_kernel_queue_v9.o \ kfd_packet_manager.o kfd_process_queue_manager.o \ kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \ - kfd_device_queue_manager_vi.o \ + kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \ kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 500f022d089d..9af94b1f9074 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1386,7 +1386,10 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm, void __user *alternate_aperture_base, uint64_t alternate_aperture_size) { - bool retval; + bool retval = true; + + if (!dqm->asic_ops.set_cache_memory_policy) + return retval; mutex_lock(&dqm->lock); @@ -1655,6 +1658,11 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_POLARIS11: device_queue_manager_init_vi_tonga(&dqm->asic_ops); break; + + case CHIP_VEGA10: + case CHIP_RAVEN: + device_queue_manager_init_v9(&dqm->asic_ops); + break; default: WARN(1, "Unexpected ASIC family %u", dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 412beff3281d..59a6b1956932 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -200,6 +200,8 @@ void device_queue_manager_init_vi( struct device_queue_manager_asic_ops *asic_ops); void device_queue_manager_init_vi_tonga( struct device_queue_manager_asic_ops *asic_ops); +void device_queue_manager_init_v9( + struct device_queue_manager_asic_ops *asic_ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); unsigned int get_queues_num(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c new file mode 100644 index 000000000000..79e5bcf6367c --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -0,0 +1,84 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "kfd_device_queue_manager.h" +#include "vega10_enum.h" +#include "gc/gc_9_0_offset.h" +#include "gc/gc_9_0_sh_mask.h" +#include "sdma0/sdma0_4_0_sh_mask.h" + +static int update_qpd_v9(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, + struct qcm_process_device *qpd); + +void device_queue_manager_init_v9( + struct device_queue_manager_asic_ops *asic_ops) +{ + asic_ops->update_qpd = update_qpd_v9; + asic_ops->init_sdma_vm = init_sdma_vm_v9; +} + +static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) +{ + uint32_t shared_base = pdd->lds_base >> 48; + uint32_t private_base = pdd->scratch_base >> 48; + + return (shared_base << SH_MEM_BASES__SHARED_BASE__SHIFT) | + private_base; +} + +static int update_qpd_v9(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + struct kfd_process_device *pdd; + + pdd = qpd_to_pdd(qpd); + + /* check if sh_mem_config register already configured */ + if (qpd->sh_mem_config == 0) { + qpd->sh_mem_config = + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; + if (vega10_noretry && + !dqm->dev->device_info->needs_iommu_device) + qpd->sh_mem_config |= + 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; + + qpd->sh_mem_ape1_limit = 0; + qpd->sh_mem_ape1_base = 0; + } + + qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); + + pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); + + return 0; +} + +static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, + struct qcm_process_device *qpd) +{ + /* Not needed on SDMAv4 any more */ + q->properties.sdma_vm_addr = 0; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 45bc458f7348..76bf2dc8aec4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -83,6 +83,11 @@ module_param(ignore_crat, int, 0444); MODULE_PARM_DESC(ignore_crat, "Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)"); +int vega10_noretry; +module_param_named(noretry, vega10_noretry, int, 0644); +MODULE_PARM_DESC(noretry, + "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)"); + static int amdkfd_init_completed; int kgd2kfd_init(unsigned int interface_version, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fac28827b000..d5cdb5db4983 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -137,6 +137,11 @@ extern int debug_largebar; */ extern int ignore_crat; +/* + * Set sh_mem_config.retry_disable on Vega10 + */ +extern int vega10_noretry; + /** * enum kfd_sched_policy * -- cgit v1.2.1 From ca750681bc4a897ffa7eed71a1e05762fb1f0a34 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:10 -0400 Subject: drm/amdkfd: Add SOC15 interrupt processing support Signed-off-by: Shaoyun Liu Signed-off-by: Oak Zeng Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/Makefile | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 84 +++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 + drivers/gpu/drm/amd/amdkfd/soc15_int.h | 47 ++++++++++++++ 4 files changed, 134 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c create mode 100644 drivers/gpu/drm/amd/amdkfd/soc15_int.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index ff8b5aa11f4e..ffd096fffc1c 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -37,7 +37,7 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_device_queue_manager.o kfd_device_queue_manager_cik.o \ kfd_device_queue_manager_vi.o kfd_device_queue_manager_v9.o \ kfd_interrupt.o kfd_events.o cik_event_interrupt.o \ - kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o + kfd_int_process_v9.o kfd_dbgdev.o kfd_dbgmgr.o kfd_crat.o ifneq ($(CONFIG_AMD_IOMMU_V2),) amdkfd-y += kfd_iommu.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c new file mode 100644 index 000000000000..39d41155581f --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -0,0 +1,84 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_priv.h" +#include "kfd_events.h" +#include "soc15_int.h" + + +static bool event_interrupt_isr_v9(struct kfd_dev *dev, + const uint32_t *ih_ring_entry) +{ + uint16_t source_id, client_id, pasid, vmid; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + + if (pasid) { + const uint32_t *data = ih_ring_entry; + + pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n", + client_id, source_id, pasid); + pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], + data[4], data[5], data[6], data[7]); + } + + return (pasid != 0) && + (source_id == SOC15_INTSRC_CP_END_OF_PIPE || + source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || + source_id == SOC15_INTSRC_CP_BAD_OPCODE); +} + +static void event_interrupt_wq_v9(struct kfd_dev *dev, + const uint32_t *ih_ring_entry) +{ + uint16_t source_id, client_id, pasid, vmid; + uint32_t context_id; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + context_id = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); + + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) + kfd_signal_event_interrupt(pasid, context_id, 32); + else if (source_id == SOC15_INTSRC_SDMA_TRAP) + kfd_signal_event_interrupt(pasid, context_id & 0xfffffff, 28); + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) + kfd_signal_event_interrupt(pasid, context_id & 0xffffff, 24); + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + kfd_signal_hw_exception_event(pasid); + else if (client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_UTCL2) { + /* TODO */ + } +} + +const struct kfd_event_interrupt_class event_interrupt_class_v9 = { + .interrupt_isr = event_interrupt_isr_v9, + .interrupt_wq = event_interrupt_wq_v9, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d5cdb5db4983..06b210b33dda 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -934,6 +934,8 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; +extern const struct kfd_event_interrupt_class event_interrupt_class_v9; + extern const struct kfd_device_global_init_class device_global_init_class_cik; void kfd_event_init_process(struct kfd_process *p); diff --git a/drivers/gpu/drm/amd/amdkfd/soc15_int.h b/drivers/gpu/drm/amd/amdkfd/soc15_int.h new file mode 100644 index 000000000000..0bc0b25cb410 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/soc15_int.h @@ -0,0 +1,47 @@ +/* + * Copyright 2016-2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef HSA_SOC15_INT_H_INCLUDED +#define HSA_SOC15_INT_H_INCLUDED + +#include "soc15_ih_clientid.h" + +#define SOC15_INTSRC_CP_END_OF_PIPE 181 +#define SOC15_INTSRC_CP_BAD_OPCODE 183 +#define SOC15_INTSRC_SQ_INTERRUPT_MSG 239 +#define SOC15_INTSRC_VMC_FAULT 0 +#define SOC15_INTSRC_SDMA_TRAP 224 + + +#define SOC15_CLIENT_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) & 0xff) +#define SOC15_SOURCE_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 8 & 0xff) +#define SOC15_RING_ID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 16 & 0xff) +#define SOC15_VMID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 24 & 0xf) +#define SOC15_VMID_TYPE_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[0]) >> 31 & 0x1) +#define SOC15_PASID_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[3]) & 0xffff) +#define SOC15_CONTEXT_ID0_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[4])) +#define SOC15_CONTEXT_ID1_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[5])) +#define SOC15_CONTEXT_ID2_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[6])) +#define SOC15_CONTEXT_ID3_FROM_IH_ENTRY(entry) (le32_to_cpu(entry[7])) + +#endif + -- cgit v1.2.1 From 2a26fbfe80015faef830bc47c5223b4b31d41791 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:11 -0400 Subject: drm/amdkfd: Fix goto usage Missed a spot in previous cleanup commit: Remove gotos that do not feature any common cleanup, and use gotos instead of repeating cleanup commands. According to kernel.org: "The goto statement comes in handy when a function exits from multiple locations and some common work such as cleanup has to be done. If there is no cleanup needed then just return directly." Signed-off-by: Kent Russell Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 69f496485331..23e586b0507c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -232,18 +232,16 @@ static int acquire_packet_buffer(struct kernel_queue *kq, * make sure calling functions know * acquire_packet_buffer() failed */ - *buffer_ptr = NULL; - return -ENOMEM; + goto err_no_space; } if (wptr + packet_size_in_dwords >= queue_size_dwords) { /* make sure after rolling back to position 0, there is * still enough space. */ - if (packet_size_in_dwords >= rptr) { - *buffer_ptr = NULL; - return -ENOMEM; - } + if (packet_size_in_dwords >= rptr) + goto err_no_space; + /* fill nops, roll back and start at position 0 */ while (wptr > 0) { queue_address[wptr] = kq->nop_packet; @@ -255,6 +253,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq, kq->pending_wptr = wptr + packet_size_in_dwords; return 0; + +err_no_space: + *buffer_ptr = NULL; + return -ENOMEM; } static void submit_packet(struct kernel_queue *kq) -- cgit v1.2.1 From bebfd2f4126a115420a2b04f44a05552c12e5b46 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:12 -0400 Subject: drm/amdkfd: Fix kernel queue rollback_packet kq->queue->properties.write_ptr is a GPU address which can'd be derefenced in the kernel. Use kq->wptr_kernel instead, which is the kernel CPU address of the same buffer. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 23e586b0507c..9f381612afd7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -279,7 +279,7 @@ static void submit_packet(struct kernel_queue *kq) static void rollback_packet(struct kernel_queue *kq) { - kq->pending_wptr = *kq->queue->properties.write_ptr; + kq->pending_wptr = *kq->wptr_kernel; } struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, -- cgit v1.2.1 From 9d7d024816686f922735f7adccd00e3fc44e2e03 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sun, 8 Apr 2018 22:03:51 -0400 Subject: drm/amdkfd: Add 64-bit doorbell and wptr support to kernel queue v2: Removed redundant 0x before %p. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c | 10 +++++++++ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 25 +++++++++++++++++------ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h | 7 ++++++- drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c | 9 ++++++++ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c | 9 ++++++++ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c | 9 ++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 + 7 files changed, 63 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c index 36c9269ea7c0..c3744d89352c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c @@ -214,6 +214,16 @@ void write_kernel_doorbell(void __iomem *db, u32 value) } } +void write_kernel_doorbell64(void __iomem *db, u64 value) +{ + if (db) { + WARN(((unsigned long)db & 7) != 0, + "Unaligned 64-bit doorbell"); + writeq(value, (u64 __iomem *)db); + pr_debug("writing %llu to doorbell address %p\n", value, db); + } +} + unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 9f381612afd7..476951d8c91c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -99,7 +99,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->rptr_kernel = kq->rptr_mem->cpu_ptr; kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr; - retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->wptr_kernel), + retval = kfd_gtt_sa_allocate(dev, dev->device_info->doorbell_size, &kq->wptr_mem); if (retval != 0) @@ -208,6 +208,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, size_t available_size; size_t queue_size_dwords; uint32_t wptr, rptr; + uint64_t wptr64; unsigned int *queue_address; /* When rptr == wptr, the buffer is empty. @@ -216,7 +217,8 @@ static int acquire_packet_buffer(struct kernel_queue *kq, * the opposite. So we can only use up to queue_size_dwords - 1 dwords. */ rptr = *kq->rptr_kernel; - wptr = *kq->wptr_kernel; + wptr = kq->pending_wptr; + wptr64 = kq->pending_wptr64; queue_address = (unsigned int *)kq->pq_kernel_addr; queue_size_dwords = kq->queue->properties.queue_size / 4; @@ -246,11 +248,13 @@ static int acquire_packet_buffer(struct kernel_queue *kq, while (wptr > 0) { queue_address[wptr] = kq->nop_packet; wptr = (wptr + 1) % queue_size_dwords; + wptr64++; } } *buffer_ptr = &queue_address[wptr]; kq->pending_wptr = wptr + packet_size_in_dwords; + kq->pending_wptr64 = wptr64 + packet_size_in_dwords; return 0; @@ -272,14 +276,18 @@ static void submit_packet(struct kernel_queue *kq) pr_debug("\n"); #endif - *kq->wptr_kernel = kq->pending_wptr; - write_kernel_doorbell(kq->queue->properties.doorbell_ptr, - kq->pending_wptr); + kq->ops_asic_specific.submit_packet(kq); } static void rollback_packet(struct kernel_queue *kq) { - kq->pending_wptr = *kq->wptr_kernel; + if (kq->dev->device_info->doorbell_size == 8) { + kq->pending_wptr64 = *kq->wptr64_kernel; + kq->pending_wptr = *kq->wptr_kernel % + (kq->queue->properties.queue_size / 4); + } else { + kq->pending_wptr = *kq->wptr_kernel; + } } struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, @@ -310,6 +318,11 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_HAWAII: kernel_queue_init_cik(&kq->ops_asic_specific); break; + + case CHIP_VEGA10: + case CHIP_RAVEN: + kernel_queue_init_v9(&kq->ops_asic_specific); + break; default: WARN(1, "Unexpected ASIC family %u", dev->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h index 594053136ee4..97aff2041a5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h @@ -72,6 +72,7 @@ struct kernel_queue { struct kfd_dev *dev; struct mqd_manager *mqd; struct queue *queue; + uint64_t pending_wptr64; uint32_t pending_wptr; unsigned int nop_packet; @@ -79,7 +80,10 @@ struct kernel_queue { uint32_t *rptr_kernel; uint64_t rptr_gpu_addr; struct kfd_mem_obj *wptr_mem; - uint32_t *wptr_kernel; + union { + uint64_t *wptr64_kernel; + uint32_t *wptr_kernel; + }; uint64_t wptr_gpu_addr; struct kfd_mem_obj *pq; uint64_t pq_gpu_addr; @@ -97,5 +101,6 @@ struct kernel_queue { void kernel_queue_init_cik(struct kernel_queue_ops *ops); void kernel_queue_init_vi(struct kernel_queue_ops *ops); +void kernel_queue_init_v9(struct kernel_queue_ops *ops); #endif /* KFD_KERNEL_QUEUE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c index a90eb440b1fb..19e54acb4125 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c @@ -26,11 +26,13 @@ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size); static void uninitialize_cik(struct kernel_queue *kq); +static void submit_packet_cik(struct kernel_queue *kq); void kernel_queue_init_cik(struct kernel_queue_ops *ops) { ops->initialize = initialize_cik; ops->uninitialize = uninitialize_cik; + ops->submit_packet = submit_packet_cik; } static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, @@ -42,3 +44,10 @@ static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev, static void uninitialize_cik(struct kernel_queue *kq) { } + +static void submit_packet_cik(struct kernel_queue *kq) +{ + *kq->wptr_kernel = kq->pending_wptr; + write_kernel_doorbell(kq->queue->properties.doorbell_ptr, + kq->pending_wptr); +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c index ece7d59537b7..684a3bf07efd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c @@ -29,11 +29,13 @@ static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size); static void uninitialize_v9(struct kernel_queue *kq); +static void submit_packet_v9(struct kernel_queue *kq); void kernel_queue_init_v9(struct kernel_queue_ops *ops) { ops->initialize = initialize_v9; ops->uninitialize = uninitialize_v9; + ops->submit_packet = submit_packet_v9; } static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev, @@ -58,6 +60,13 @@ static void uninitialize_v9(struct kernel_queue *kq) kfd_gtt_sa_free(kq->dev, kq->eop_mem); } +static void submit_packet_v9(struct kernel_queue *kq) +{ + *kq->wptr64_kernel = kq->pending_wptr64; + write_kernel_doorbell64(kq->queue->properties.doorbell_ptr, + kq->pending_wptr64); +} + static int pm_map_process_v9(struct packet_manager *pm, uint32_t *buffer, struct qcm_process_device *qpd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c index f9019efd31b9..bf20c6d32ef3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c @@ -29,11 +29,13 @@ static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, enum kfd_queue_type type, unsigned int queue_size); static void uninitialize_vi(struct kernel_queue *kq); +static void submit_packet_vi(struct kernel_queue *kq); void kernel_queue_init_vi(struct kernel_queue_ops *ops) { ops->initialize = initialize_vi; ops->uninitialize = uninitialize_vi; + ops->submit_packet = submit_packet_vi; } static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev, @@ -58,6 +60,13 @@ static void uninitialize_vi(struct kernel_queue *kq) kfd_gtt_sa_free(kq->dev, kq->eop_mem); } +static void submit_packet_vi(struct kernel_queue *kq) +{ + *kq->wptr_kernel = kq->pending_wptr; + write_kernel_doorbell(kq->queue->properties.doorbell_ptr, + kq->pending_wptr); +} + unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size) { union PM4_MES_TYPE_3_HEADER header; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 06b210b33dda..10d5b5445195 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -769,6 +769,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); u32 read_kernel_doorbell(u32 __iomem *db); void write_kernel_doorbell(void __iomem *db, u32 value); +void write_kernel_doorbell64(void __iomem *db, u64 value); unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, struct kfd_process *process, unsigned int doorbell_id); -- cgit v1.2.1 From 6aac0a48b02f5d7ed64e4fdc2aa48843d425905b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:14 -0400 Subject: drm/amdkfd: Remove limit on number of GPUs (follow-up) This condition was missed in a previous commit with the same title. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 66852de410c8..f16ac2b2f060 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -307,9 +307,7 @@ int kfd_init_apertures(struct kfd_process *process) struct kfd_process_device *pdd; /*Iterating over all devices*/ - while (kfd_topology_enum_kfd_devices(id, &dev) == 0 && - id < NUM_OF_SUPPORTED_GPUS) { - + while (kfd_topology_enum_kfd_devices(id, &dev) == 0) { if (!dev) { id++; /* Skip non GPU devices */ continue; -- cgit v1.2.1 From 70a31d16ccac518c701b9fbfacce5460a226bfd9 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:15 -0400 Subject: drm/amdkfd: Support flat memory apertures for GFXv9 Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c | 115 ++++++++++++++++++++------- 1 file changed, 87 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index f16ac2b2f060..97d5423c5673 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -275,23 +275,35 @@ * for FLAT_* / S_LOAD operations. */ -#define MAKE_GPUVM_APP_BASE(gpu_num) \ +#define MAKE_GPUVM_APP_BASE_VI(gpu_num) \ (((uint64_t)(gpu_num) << 61) + 0x1000000000000L) #define MAKE_GPUVM_APP_LIMIT(base, size) \ (((uint64_t)(base) & 0xFFFFFF0000000000UL) + (size) - 1) -#define MAKE_SCRATCH_APP_BASE() \ +#define MAKE_SCRATCH_APP_BASE_VI() \ (((uint64_t)(0x1UL) << 61) + 0x100000000L) #define MAKE_SCRATCH_APP_LIMIT(base) \ (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) -#define MAKE_LDS_APP_BASE() \ +#define MAKE_LDS_APP_BASE_VI() \ (((uint64_t)(0x1UL) << 61) + 0x0) #define MAKE_LDS_APP_LIMIT(base) \ (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF) +/* On GFXv9 the LDS and scratch apertures are programmed independently + * using the high 16 bits of the 64-bit virtual address. They must be + * in the hole, which will be the case as long as the high 16 bits are + * not 0. + * + * The aperture sizes are still 4GB implicitly. + * + * A GPUVM aperture is not applicable on GFXv9. + */ +#define MAKE_LDS_APP_BASE_V9() ((uint64_t)(0x1UL) << 48) +#define MAKE_SCRATCH_APP_BASE_V9() ((uint64_t)(0x2UL) << 48) + /* User mode manages most of the SVM aperture address space. The low * 16MB are reserved for kernel use (CWSR trap handler and kernel IB * for now). @@ -300,6 +312,55 @@ #define SVM_CWSR_BASE (SVM_USER_BASE - KFD_CWSR_TBA_TMA_SIZE) #define SVM_IB_BASE (SVM_CWSR_BASE - PAGE_SIZE) +static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) +{ + /* + * node id couldn't be 0 - the three MSB bits of + * aperture shoudn't be 0 + */ + pdd->lds_base = MAKE_LDS_APP_BASE_VI(); + pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); + + if (!pdd->dev->device_info->needs_iommu_device) { + /* dGPUs: SVM aperture starting at 0 + * with small reserved space for kernel. + * Set them to CANONICAL addresses. + */ + pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_limit = + pdd->dev->shared_resources.gpuvm_size - 1; + } else { + /* set them to non CANONICAL addresses, and no SVM is + * allocated. + */ + pdd->gpuvm_base = MAKE_GPUVM_APP_BASE_VI(id + 1); + pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base, + pdd->dev->shared_resources.gpuvm_size); + } + + pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); + pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); +} + +static void kfd_init_apertures_v9(struct kfd_process_device *pdd, uint8_t id) +{ + pdd->lds_base = MAKE_LDS_APP_BASE_V9(); + pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); + + /* Raven needs SVM to support graphic handle, etc. Leave the small + * reserved space before SVM on Raven as well, even though we don't + * have to. + * Set gpuvm_base and gpuvm_limit to CANONICAL addresses so that they + * are used in Thunk to reserve SVM. + */ + pdd->gpuvm_base = SVM_USER_BASE; + pdd->gpuvm_limit = + pdd->dev->shared_resources.gpuvm_size - 1; + + pdd->scratch_base = MAKE_SCRATCH_APP_BASE_V9(); + pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); +} + int kfd_init_apertures(struct kfd_process *process) { uint8_t id = 0; @@ -316,7 +377,7 @@ int kfd_init_apertures(struct kfd_process *process) pdd = kfd_create_process_device_data(dev, process); if (!pdd) { pr_err("Failed to create process device data\n"); - return -1; + return -ENOMEM; } /* * For 64 bit process apertures will be statically reserved in @@ -328,32 +389,30 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_base = pdd->gpuvm_limit = 0; pdd->scratch_base = pdd->scratch_limit = 0; } else { - /* Same LDS and scratch apertures can be used - * on all GPUs. This allows using more dGPUs - * than placement options for apertures. - */ - pdd->lds_base = MAKE_LDS_APP_BASE(); - pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); - - pdd->scratch_base = MAKE_SCRATCH_APP_BASE(); - pdd->scratch_limit = - MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); + switch (dev->device_info->asic_family) { + case CHIP_KAVERI: + case CHIP_HAWAII: + case CHIP_CARRIZO: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + kfd_init_apertures_vi(pdd, id); + break; + case CHIP_VEGA10: + case CHIP_RAVEN: + kfd_init_apertures_v9(pdd, id); + break; + default: + WARN(1, "Unexpected ASIC family %u", + dev->device_info->asic_family); + return -EINVAL; + } - if (dev->device_info->needs_iommu_device) { - /* APUs: GPUVM aperture in - * non-canonical address space - */ - pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1); - pdd->gpuvm_limit = MAKE_GPUVM_APP_LIMIT( - pdd->gpuvm_base, - dev->shared_resources.gpuvm_size); - } else { - /* dGPUs: SVM aperture starting at 0 - * with small reserved space for kernel + if (!dev->device_info->needs_iommu_device) { + /* dGPUs: the reserved space for kernel + * before SVM */ - pdd->gpuvm_base = SVM_USER_BASE; - pdd->gpuvm_limit = - dev->shared_resources.gpuvm_size - 1; pdd->qpd.cwsr_base = SVM_CWSR_BASE; pdd->qpd.ib_base = SVM_IB_BASE; } -- cgit v1.2.1 From 3e76c2399b55483b1a28499b090f9d6600ab9eff Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:16 -0400 Subject: drm/amdkfd: Add GFXv9 CWSR trap handler Signed-off-by: Shaoyun Liu Signed-off-by: Jay Cornwall Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 1495 ++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 13 +- 2 files changed, 1505 insertions(+), 3 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm new file mode 100644 index 000000000000..033580c997ea --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -0,0 +1,1495 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#if 0 +HW (GFX9) source code for CWSR trap handler +#Version 18 + multiple trap handler + +// this performance-optimal version was originally from Seven Xu at SRDC + +// Revison #18 --... +/* Rev History +** #1. Branch from gc dv. //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV) +** #4. SR Memory Layout: +** 1. VGPR-SGPR-HWREG-{LDS} +** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern.. +** #5. Update: 1. Accurate g8sr_ts_save_d timestamp +** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation) +** #7. Update: 1. don't barrier if noLDS +** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version +** 2. Fix SQ issue by s_sleep 2 +** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last +** 2. optimize s_buffer save by burst 16sgprs... +** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs. +** #11. Update 1. Add 2 more timestamp for debug version +** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance +** #13. Integ 1. Always use MUBUF for PV trap shader... +** #14. Update 1. s_buffer_store soft clause... +** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot. +** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree +** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part] +** 2. PERF - Save LDS before save VGPR to cover LDS save long latency... +** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32 +** 2. FUNC - Handle non-CWSR traps +*/ + +var G8SR_WDMEM_HWREG_OFFSET = 0 +var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes + +// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore. + +var G8SR_DEBUG_TIMESTAMP = 0 +var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset +var s_g8sr_ts_save_s = s[34:35] // save start +var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi +var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ +var s_g8sr_ts_save_d = s[40:41] // save end +var s_g8sr_ts_restore_s = s[42:43] // restore start +var s_g8sr_ts_restore_d = s[44:45] // restore end + +var G8SR_VGPR_SR_IN_DWX4 = 0 +var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes +var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 + + +/*************************************************************************/ +/* control on how to run the shader */ +/*************************************************************************/ +//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run) +var EMU_RUN_HACK = 0 +var EMU_RUN_HACK_RESTORE_NORMAL = 0 +var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0 +var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0 +var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK +var SAVE_LDS = 1 +var WG_BASE_ADDR_LO = 0x9000a000 +var WG_BASE_ADDR_HI = 0x0 +var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem +var CTX_SAVE_CONTROL = 0x0 +var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL +var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run) +var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write +var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes +var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing +var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency + +/**************************************************************************/ +/* variables */ +/**************************************************************************/ +var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 +var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 +var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 +var SQ_WAVE_STATUS_HALT_MASK = 0x2000 + +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 +var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8 +var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24 +var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 3 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits + +var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400 +var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask +var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10 +var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100 +var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0 +var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11 +var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21 +var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800 + +var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME +var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME +var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000 +var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME + +var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24 +var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27 + +var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 26 // bits [31:26] unused by SPI debug data +var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0xFC000000 + +/* Save */ +var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes +var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE + +var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit +var S_SAVE_SPI_INIT_ATC_SHIFT = 27 +var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype +var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28 +var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG +var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used +var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME +var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME +var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME + +var s_save_spi_init_lo = exec_lo +var s_save_spi_init_hi = exec_hi + +var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} +var s_save_pc_hi = ttmp1 +var s_save_exec_lo = ttmp2 +var s_save_exec_hi = ttmp3 +var s_save_tmp = ttmp4 +var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine +var s_save_xnack_mask_lo = ttmp6 +var s_save_xnack_mask_hi = ttmp7 +var s_save_buf_rsrc0 = ttmp8 +var s_save_buf_rsrc1 = ttmp9 +var s_save_buf_rsrc2 = ttmp10 +var s_save_buf_rsrc3 = ttmp11 +var s_save_status = ttmp12 +var s_save_mem_offset = ttmp14 +var s_save_alloc_size = s_save_trapsts //conflict +var s_save_m0 = ttmp15 +var s_save_ttmps_lo = s_save_tmp //no conflict +var s_save_ttmps_hi = s_save_trapsts //no conflict + +/* Restore */ +var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE +var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC + +var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit +var S_RESTORE_SPI_INIT_ATC_SHIFT = 27 +var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype +var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28 +var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG +var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26 + +var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT +var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK +var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT +var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK + +var s_restore_spi_init_lo = exec_lo +var s_restore_spi_init_hi = exec_hi + +var s_restore_mem_offset = ttmp12 +var s_restore_alloc_size = ttmp3 +var s_restore_tmp = ttmp2 +var s_restore_mem_offset_save = s_restore_tmp //no conflict + +var s_restore_m0 = s_restore_alloc_size //no conflict + +var s_restore_mode = ttmp7 + +var s_restore_pc_lo = ttmp0 +var s_restore_pc_hi = ttmp1 +var s_restore_exec_lo = ttmp14 +var s_restore_exec_hi = ttmp15 +var s_restore_status = ttmp4 +var s_restore_trapsts = ttmp5 +var s_restore_xnack_mask_lo = xnack_mask_lo +var s_restore_xnack_mask_hi = xnack_mask_hi +var s_restore_buf_rsrc0 = ttmp8 +var s_restore_buf_rsrc1 = ttmp9 +var s_restore_buf_rsrc2 = ttmp10 +var s_restore_buf_rsrc3 = ttmp11 +var s_restore_ttmps_lo = s_restore_tmp //no conflict +var s_restore_ttmps_hi = s_restore_alloc_size //no conflict + +/**************************************************************************/ +/* trap handler entry points */ +/**************************************************************************/ +/* Shader Main*/ + +shader main + asic(GFX9) + type(CS) + + + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore + //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC + s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC + s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f. + s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE + //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE + s_branch L_SKIP_RESTORE //NOT restore, SAVE actually + else + s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save + end + +L_JUMP_TO_RESTORE: + s_branch L_RESTORE //restore + +L_SKIP_RESTORE: + + s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC + s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save + s_cbranch_scc1 L_SAVE //this is the operation for save + + // ********* Handle non-CWSR traps ******************* +if (!EMU_RUN_HACK) + // Illegal instruction is a non-maskable exception which blocks context save. + // Halt the wavefront and return from the trap. + s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK + s_cbranch_scc1 L_HALT_WAVE + + // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA. + // Instead, halt the wavefront and return from the trap. + s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK + s_cbranch_scc0 L_FETCH_2ND_TRAP + +L_HALT_WAVE: + // If STATUS.HALT is set then this fault must come from SQC instruction fetch. + // We cannot prevent further faults so just terminate the wavefront. + s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK + s_cbranch_scc0 L_NOT_ALREADY_HALTED + s_endpgm +L_NOT_ALREADY_HALTED: + s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK + + // If the PC points to S_ENDPGM then context save will fail if STATUS.HALT is set. + // Rewind the PC to prevent this from occurring. The debugger compensates for this. + s_sub_u32 ttmp0, ttmp0, 0x8 + s_subb_u32 ttmp1, ttmp1, 0x0 + +L_FETCH_2ND_TRAP: + // Preserve and clear scalar XNACK state before issuing scalar reads. + // Save IB_STS.FIRST_REPLAY[15] and IB_STS.RCNT[20:16] into unused space ttmp11[31:26]. + s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS) + s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK + s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT) + s_andn2_b32 ttmp11, ttmp11, TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK + s_or_b32 ttmp11, ttmp11, ttmp3 + + s_andn2_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK + s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 + + // Read second-level TBA/TMA from first-level TMA and jump if available. + // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data) + // ttmp12 holds SQ_WAVE_STATUS + s_getreg_b32 ttmp4, hwreg(HW_REG_SQ_SHADER_TMA_LO) + s_getreg_b32 ttmp5, hwreg(HW_REG_SQ_SHADER_TMA_HI) + s_lshl_b64 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 + s_load_dwordx2 [ttmp2, ttmp3], [ttmp4, ttmp5], 0x0 glc:1 // second-level TBA + s_waitcnt lgkmcnt(0) + s_load_dwordx2 [ttmp4, ttmp5], [ttmp4, ttmp5], 0x8 glc:1 // second-level TMA + s_waitcnt lgkmcnt(0) + s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3] + s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set + s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler + +L_NO_NEXT_TRAP: + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception + s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly. + s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0 + s_addc_u32 ttmp1, ttmp1, 0 +L_EXCP_CASE: + s_and_b32 ttmp1, ttmp1, 0xFFFF + + // Restore SQ_WAVE_IB_STS. + s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT) + s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK + s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2 + + // Restore SQ_WAVE_STATUS. + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status + + s_rfe_b64 [ttmp0, ttmp1] +end + // ********* End handling of non-CWSR traps ******************* + +/**************************************************************************/ +/* save routine */ +/**************************************************************************/ + +L_SAVE: + +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_save_s + s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? +end + + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + + s_mov_b32 s_save_tmp, 0 //clear saveCtx bit + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit + + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY + s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp + s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS + s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG + + s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp + + /* inform SPI the readiness and wait for SPI's go signal */ + s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI + s_mov_b32 s_save_exec_hi, exec_hi + s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive + +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_sq_save_msg + s_waitcnt lgkmcnt(0) +end + + if (EMU_RUN_HACK) + + else + s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC + end + + L_SLEEP: + s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0 + + if (EMU_RUN_HACK) + + else + s_cbranch_execz L_SLEEP + end + +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_spi_wrexec + s_waitcnt lgkmcnt(0) +end + + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE)) + //calculate wd_addr using absolute thread id + v_readlane_b32 s_save_tmp, v9, 0 + s_lshr_b32 s_save_tmp, s_save_tmp, 6 + s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE + s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL + else + end + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE)) + s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL + else + end + + // Save trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic + // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40 + get_vgpr_size_bytes(s_save_ttmps_lo) + get_sgpr_size_bytes(s_save_ttmps_hi) + s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_ttmps_hi + s_add_u32 s_save_ttmps_lo, s_save_ttmps_lo, s_save_spi_init_lo + s_addc_u32 s_save_ttmps_hi, s_save_spi_init_hi, 0x0 + s_and_b32 s_save_ttmps_hi, s_save_ttmps_hi, 0xFFFF + s_store_dwordx2 [ttmp6, ttmp7], [s_save_ttmps_lo, s_save_ttmps_hi], 0x40 glc:1 + ack_sqc_store_workaround() + s_store_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_save_ttmps_lo, s_save_ttmps_hi], 0x48 glc:1 + ack_sqc_store_workaround() + s_store_dword ttmp13, [s_save_ttmps_lo, s_save_ttmps_hi], 0x58 glc:1 + ack_sqc_store_workaround() + s_store_dwordx2 [ttmp14, ttmp15], [s_save_ttmps_lo, s_save_ttmps_hi], 0x5C glc:1 + ack_sqc_store_workaround() + + /* setup Resource Contants */ + s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo + s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited + s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE + + //FIXME right now s_save_m0/s_save_mem_offset use tma_lo/tma_hi (might need to save them before using them?) + s_mov_b32 s_save_m0, m0 //save M0 + + /* global mem offset */ + s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0 + + + + + /* save HW registers */ + ////////////////////////////// + + L_SAVE_HWREG: + // HWREG SR memory offset : size(VGPR)+size(SGPR) + get_vgpr_size_bytes(s_save_mem_offset) + get_sgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + + + s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0 + + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME)) + s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 + s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over + end + + write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC + write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC + write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset) + write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset) //STATUS + + //s_save_trapsts conflicts with s_save_alloc_size + s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) + write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset) //TRAPSTS + + write_hwreg_to_mem(xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_LO + write_hwreg_to_mem(xnack_mask_hi, s_save_buf_rsrc0, s_save_mem_offset) //XNACK_MASK_HI + + //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2 + s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE + write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) + + + + /* the first wave in the threadgroup */ + s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK // extract fisrt wave bit + s_mov_b32 s_save_exec_hi, 0x0 + s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26] + + + /* save SGPRs */ + // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save... + ////////////////////////////// + + // SGPR SR memory offset : size(VGPR) + get_vgpr_size_bytes(s_save_mem_offset) + // TODO, change RSRC word to rearrange memory layout for SGPRS + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) + + if (SGPR_SAVE_USE_SQC) + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes + else + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) + end + + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + + // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0 + //s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0 + s_mov_b64 s_save_xnack_mask_lo, s_save_buf_rsrc0 + s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset + s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0 + + s_mov_b32 m0, 0x0 //SGPR initial index value =0 + s_nop 0x0 //Manually inserted wait states + L_SAVE_SGPR_LOOP: + // SGPR is allocated in 16 SGPR granularity + s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0] + s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0] + s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0] + s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0] + s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0] + s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0] + s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0] + s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0] + + write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset) //PV: the best performance should be using s_buffer_store_dwordx4 + s_add_u32 m0, m0, 16 //next sgpr index + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_SGPR_LOOP //SGPR save is complete? + // restore s_save_buf_rsrc0,1 + //s_mov_b64 s_save_buf_rsrc0, s_save_pc_lo + s_mov_b64 s_save_buf_rsrc0, s_save_xnack_mask_lo + + + + + /* save first 4 VGPR, then LDS save could use */ + // each wave will alloc 4 vgprs at least... + ///////////////////////////////////////////////////////////////////////////////////// + + s_mov_b32 s_save_mem_offset, 0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + s_mov_b32 xnack_mask_lo, 0x0 + s_mov_b32 xnack_mask_hi, 0x0 + + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + + // VGPR Allocated in 4-GPR granularity + +if G8SR_VGPR_SR_IN_DWX4 + // the const stride for DWx4 is 4*4 bytes + s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes + + buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + + s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes +else + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 +end + + + + /* save LDS */ + ////////////////////////////// + + L_SAVE_LDS: + + // Change EXEC to all threads... + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size + s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE + + s_barrier //LDS is used? wait for other waves in the same TG + s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here + s_cbranch_scc0 L_SAVE_LDS_DONE + + // first wave do LDS save; + + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_save_mem_offset) + get_sgpr_size_bytes(s_save_tmp) + s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp + s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes() + + + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + s_mov_b32 m0, 0x0 //lds_offset initial value = 0 + + +var LDS_DMA_ENABLE = 0 +var UNROLL = 0 +if UNROLL==0 && LDS_DMA_ENABLE==1 + s_mov_b32 s3, 256*2 + s_nop 0 + s_nop 0 + s_nop 0 + L_SAVE_LDS_LOOP: + //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.??? + if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity + buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW + buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW + end + + s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes + s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete? + +elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss + // store from higest LDS address to lowest + s_mov_b32 s3, 256*2 + s_sub_u32 m0, s_save_alloc_size, s3 + s_add_u32 s_save_mem_offset, s_save_mem_offset, m0 + s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks... + s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest + s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction + s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc + s_nop 0 + s_nop 0 + s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes + s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved + s_add_u32 s0, s0,s_save_alloc_size + s_addc_u32 s1, s1, 0 + s_setpc_b64 s[0:1] + + + for var i =0; i< 128; i++ + // be careful to make here a 64Byte aligned address, which could improve performance... + buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW + buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW + + if i!=127 + s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline + s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3 + end + end + +else // BUFFER_STORE + v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0 + v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid + v_mul_i32_i24 v2, v3, 8 // tid*8 + v_mov_b32 v3, 256*2 + s_mov_b32 m0, 0x10000 + s_mov_b32 s0, s_save_buf_rsrc3 + s_and_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0xFF7FFFFF // disable add_tid + s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, 0x58000 //DFMT + +L_SAVE_LDS_LOOP_VECTOR: + ds_read_b64 v[0:1], v2 //x =LDS[a], byte address + s_waitcnt lgkmcnt(0) + buffer_store_dwordx2 v[0:1], v2, s_save_buf_rsrc0, s_save_mem_offset offen:1 glc:1 slc:1 +// s_waitcnt vmcnt(0) +// v_add_u32 v2, vcc[0:1], v2, v3 + v_add_u32 v2, v2, v3 + v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size + s_cbranch_vccnz L_SAVE_LDS_LOOP_VECTOR + + // restore rsrc3 + s_mov_b32 s_save_buf_rsrc3, s0 + +end + +L_SAVE_LDS_DONE: + + + /* save VGPRs - set the Rest VGPRs */ + ////////////////////////////////////////////////////////////////////////////////////// + L_SAVE_VGPR: + // VGPR SR memory offset: 0 + // TODO rearrange the RSRC words to use swizzle for VGPR save... + + s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_save_alloc_size, s_save_alloc_size, 1 + s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible + s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + + // VGPR Allocated in 4-GPR granularity + +if G8SR_VGPR_SR_IN_DWX4 + // the const stride for DWx4 is 4*4 bytes + s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes + + s_mov_b32 m0, 4 // skip first 4 VGPRs + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs + + s_set_gpr_idx_on m0, 0x1 // This will change M0 + s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0 +L_SAVE_VGPR_LOOP: + v_mov_b32 v0, v0 // v0 = v[0+m0] + v_mov_b32 v1, v1 + v_mov_b32 v2, v2 + v_mov_b32 v3, v3 + + + buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + s_add_u32 m0, m0, 4 + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? + s_set_gpr_idx_off +L_SAVE_VGPR_LOOP_END: + + s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes +else + // VGPR store using dw burst + s_mov_b32 m0, 0x4 //VGPR initial index value =0 + s_cmp_lt_u32 m0, s_save_alloc_size + s_cbranch_scc0 L_SAVE_VGPR_END + + + s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1 + s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later + + L_SAVE_VGPR_LOOP: + v_mov_b32 v0, v0 //v0 = v[0+m0] + v_mov_b32 v1, v1 //v0 = v[0+m0] + v_mov_b32 v2, v2 //v0 = v[0+m0] + v_mov_b32 v3, v3 //v0 = v[0+m0] + + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 + buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256 + buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2 + buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3 + end + + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes + s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete? + s_set_gpr_idx_off +end + +L_SAVE_VGPR_END: + + + + + + + /* S_PGM_END_SAVED */ //FIXME graphics ONLY + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT)) + s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32] + s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4 + s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over + s_rfe_b64 s_save_pc_lo //Return to the main shader program + else + end + +// Save Done timestamp +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_save_d + // SGPR SR memory offset : size(VGPR) + get_vgpr_size_bytes(s_save_mem_offset) + s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET + s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? + // Need reset rsrc2?? + s_mov_b32 m0, s_save_mem_offset + s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1 +end + + + s_branch L_END_PGM + + + +/**************************************************************************/ +/* restore routine */ +/**************************************************************************/ + +L_RESTORE: + /* Setup Resource Contants */ + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) + //calculate wd_addr using absolute thread id + v_readlane_b32 s_restore_tmp, v9, 0 + s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 + s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE + s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO + s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI + s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL + else + end + +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_restore_s + s_waitcnt lgkmcnt(0) //FIXME, will cause xnack?? + // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case... + s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0] + s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored.. +end + + + + s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo + s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE + s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) + s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK + s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position + s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE + + /* global mem offset */ +// s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0 + + /* the first wave in the threadgroup */ + s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK + s_cbranch_scc0 L_RESTORE_VGPR + + /* restore LDS */ + ////////////////////////////// + L_RESTORE_LDS: + + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size + s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero? + s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes + s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes + + // LDS at offset: size(VGPR)+SIZE(SGPR)+SIZE(HWREG) + // + get_vgpr_size_bytes(s_restore_mem_offset) + get_sgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow??? + + + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + s_mov_b32 m0, 0x0 //lds_offset initial value = 0 + + L_RESTORE_LDS_LOOP: + if (SAVE_LDS) + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW + end + s_add_u32 m0, m0, 256*2 // 128 DW + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW + s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_LDS_LOOP //LDS restore is complete? + + + /* restore VGPRs */ + ////////////////////////////// + L_RESTORE_VGPR: + // VGPR SR memory offset : 0 + s_mov_b32 s_restore_mem_offset, 0x0 + s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead + s_mov_b32 exec_hi, 0xFFFFFFFF + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4) + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + +if G8SR_VGPR_SR_IN_DWX4 + get_vgpr_size_bytes(s_restore_mem_offset) + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 + + // the const stride for DWx4 is 4*4 bytes + s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes + + s_mov_b32 m0, s_restore_alloc_size + s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0 + +L_RESTORE_VGPR_LOOP: + buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + s_waitcnt vmcnt(0) + s_sub_u32 m0, m0, 4 + v_mov_b32 v0, v0 // v[0+m0] = v0 + v_mov_b32 v1, v1 + v_mov_b32 v2, v2 + v_mov_b32 v3, v3 + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 + s_cmp_eq_u32 m0, 0x8000 + s_cbranch_scc0 L_RESTORE_VGPR_LOOP + s_set_gpr_idx_off + + s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0 + s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes + +else + // VGPR load using dw burst + s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 + s_mov_b32 m0, 4 //VGPR initial index value = 1 + s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8 + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later + + L_RESTORE_VGPR_LOOP: + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3 + end + s_waitcnt vmcnt(0) //ensure data ready + v_mov_b32 v0, v0 //v[0+m0] = v0 + v_mov_b32 v1, v1 + v_mov_b32 v2, v2 + v_mov_b32 v3, v3 + s_add_u32 m0, m0, 4 //next vgpr index + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes + s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete? + s_set_gpr_idx_off + /* VGPR restore on v0 */ + if(USE_MTBUF_INSTEAD_OF_MUBUF) + tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1 + else + buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 + buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 + buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 + buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + end + +end + + /* restore SGPRs */ + ////////////////////////////// + + // SGPR SR memory offset : size(VGPR) + get_vgpr_size_bytes(s_restore_mem_offset) + get_sgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 16*4 // restore SGPR from S[n] to S[0], by 16 sgprs group + // TODO, change RSRC word to rearrange memory layout for SGPRS + + s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size + s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1 + s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value) + + if (SGPR_SAVE_USE_SQC) + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes + else + s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads) + end + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + s_mov_b32 m0, s_restore_alloc_size + + L_RESTORE_SGPR_LOOP: + read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset) //PV: further performance improvement can be made + s_waitcnt lgkmcnt(0) //ensure data ready + + s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0] + s_nop 0 // hazard SALU M0=> S_MOVREL + + s_movreld_b64 s0, s0 //s[0+m0] = s0 + s_movreld_b64 s2, s2 + s_movreld_b64 s4, s4 + s_movreld_b64 s6, s6 + s_movreld_b64 s8, s8 + s_movreld_b64 s10, s10 + s_movreld_b64 s12, s12 + s_movreld_b64 s14, s14 + + s_cmp_eq_u32 m0, 0 //scc = (m0 < s_restore_alloc_size) ? 1 : 0 + s_cbranch_scc0 L_RESTORE_SGPR_LOOP //SGPR restore (except s0) is complete? + + /* restore HW registers */ + ////////////////////////////// + L_RESTORE_HWREG: + + +if G8SR_DEBUG_TIMESTAMP + s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo + s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi +end + + // HWREG SR memory offset : size(VGPR)+size(SGPR) + get_vgpr_size_bytes(s_restore_mem_offset) + get_sgpr_size_bytes(s_restore_tmp) + s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp + + + s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes + if (SWIZZLE_EN) + s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking? + else + s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes + end + + read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0 + read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC + read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //EXEC + read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset) + read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset) //STATUS + read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset) //TRAPSTS + read_hwreg_from_mem(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_LO + read_hwreg_from_mem(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset) //XNACK_MASK_HI + read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset) //MODE + + s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS + + s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS + + //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: + if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) + s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) + s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over + end + if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL)) + s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal + s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over + end + + s_mov_b32 m0, s_restore_m0 + s_mov_b32 exec_lo, s_restore_exec_lo + s_mov_b32 exec_hi, s_restore_exec_hi + + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0 + s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT + s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0 + //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore + s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode + + // Restore trap temporaries 6-11, 13-15 initialized by SPI debug dispatch logic + // ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40 + get_vgpr_size_bytes(s_restore_ttmps_lo) + get_sgpr_size_bytes(s_restore_ttmps_hi) + s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_ttmps_hi + s_add_u32 s_restore_ttmps_lo, s_restore_ttmps_lo, s_restore_buf_rsrc0 + s_addc_u32 s_restore_ttmps_hi, s_restore_buf_rsrc1, 0x0 + s_and_b32 s_restore_ttmps_hi, s_restore_ttmps_hi, 0xFFFF + s_load_dwordx2 [ttmp6, ttmp7], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x40 glc:1 + s_load_dwordx4 [ttmp8, ttmp9, ttmp10, ttmp11], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x48 glc:1 + s_load_dword ttmp13, [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x58 glc:1 + s_load_dwordx2 [ttmp14, ttmp15], [s_restore_ttmps_lo, s_restore_ttmps_hi], 0x5C glc:1 + s_waitcnt lgkmcnt(0) + + //reuse s_restore_m0 as a temp register + s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT + s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero + s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT + s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT + s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0 + s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK + s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT + s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp + + s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 + s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 + s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu + + s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time + +if G8SR_DEBUG_TIMESTAMP + s_memrealtime s_g8sr_ts_restore_d + s_waitcnt lgkmcnt(0) +end + +// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution + s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc + + +/**************************************************************************/ +/* the END */ +/**************************************************************************/ +L_END_PGM: + s_endpgm + +end + + +/**************************************************************************/ +/* the helper functions */ +/**************************************************************************/ + +//Only for save hwreg to mem +function write_hwreg_to_mem(s, s_rsrc, s_mem_offset) + s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on + s_mov_b32 m0, s_mem_offset + s_buffer_store_dword s, s_rsrc, m0 glc:1 + ack_sqc_store_workaround() + s_add_u32 s_mem_offset, s_mem_offset, 4 + s_mov_b32 m0, exec_lo +end + + +// HWREG are saved before SGPRs, so all HWREG could be use. +function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset) + + s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1 + ack_sqc_store_workaround() + s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1 + ack_sqc_store_workaround() + s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1 + ack_sqc_store_workaround() + s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1 + ack_sqc_store_workaround() + s_add_u32 s_rsrc[0], s_rsrc[0], 4*16 + s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0 // +scc +end + + +function read_hwreg_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1 + s_add_u32 s_mem_offset, s_mem_offset, 4 +end + +function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset) + s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1 + s_sub_u32 s_mem_offset, s_mem_offset, 4*16 +end + + + +function get_lds_size_bytes(s_lds_size_byte) + // SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW + s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) // lds_size + s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW +end + +function get_vgpr_size_bytes(s_vgpr_size_byte) + s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size + s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1 + s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible +end + +function get_sgpr_size_bytes(s_sgpr_size_byte) + s_getreg_b32 s_sgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size + s_add_u32 s_sgpr_size_byte, s_sgpr_size_byte, 1 + s_lshl_b32 s_sgpr_size_byte, s_sgpr_size_byte, 6 //Number of SGPRs = (sgpr_size + 1) * 16 *4 (non-zero value) +end + +function get_hwreg_size_bytes + return 128 //HWREG size 128 bytes +end + +function ack_sqc_store_workaround + if ACK_SQC_STORE + s_waitcnt lgkmcnt(0) + end +end + + +#endif + +static const uint32_t cwsr_trap_gfx9_hex[] = { + 0xbf820001, 0xbf820158, + 0xb8f8f802, 0x89788678, + 0xb8f1f803, 0x866eff71, + 0x00000400, 0xbf850034, + 0x866eff71, 0x00000800, + 0xbf850003, 0x866eff71, + 0x00000100, 0xbf840008, + 0x866eff78, 0x00002000, + 0xbf840001, 0xbf810000, + 0x8778ff78, 0x00002000, + 0x80ec886c, 0x82ed806d, + 0xb8eef807, 0x866fff6e, + 0x001f8000, 0x8e6f8b6f, + 0x8977ff77, 0xfc000000, + 0x87776f77, 0x896eff6e, + 0x001f8000, 0xb96ef807, + 0xb8f0f812, 0xb8f1f813, + 0x8ef08870, 0xc0071bb8, + 0x00000000, 0xbf8cc07f, + 0xc0071c38, 0x00000008, + 0xbf8cc07f, 0x86ee6e6e, + 0xbf840001, 0xbe801d6e, + 0xb8f1f803, 0x8671ff71, + 0x000001ff, 0xbf850002, + 0x806c846c, 0x826d806d, + 0x866dff6d, 0x0000ffff, + 0x8f6e8b77, 0x866eff6e, + 0x001f8000, 0xb96ef807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb978f802, 0xbe801f6c, + 0x866dff6d, 0x0000ffff, + 0xbef00080, 0xb9700283, + 0xb8f02407, 0x8e709c70, + 0x876d706d, 0xb8f003c7, + 0x8e709b70, 0x876d706d, + 0xb8f0f807, 0x8670ff70, + 0x00007fff, 0xb970f807, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbf900004, + 0xbf8e0002, 0xbf88fffe, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0xb8f11605, + 0x80718171, 0x8e718671, + 0x80707170, 0x80707e70, + 0x8271807f, 0x8671ff71, + 0x0000ffff, 0xc0471cb8, + 0x00000040, 0xbf8cc07f, + 0xc04b1d38, 0x00000048, + 0xbf8cc07f, 0xc0431e78, + 0x00000058, 0xbf8cc07f, + 0xc0471eb8, 0x0000005c, + 0xbf8cc07f, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x8670ff7f, + 0x08000000, 0x8f708370, + 0x87777077, 0x8670ff7f, + 0x70000000, 0x8f708170, + 0x87777077, 0xbefb007c, + 0xbefa0080, 0xb8fa2a05, + 0x807a817a, 0x8e7a8a7a, + 0xb8f01605, 0x80708170, + 0x8e708670, 0x807a707a, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xbefe007c, + 0xbefc007a, 0xc0611efa, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611b3a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611b7a, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611bba, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611bfa, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611e3a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xb8f1f803, + 0xbefe007c, 0xbefc007a, + 0xc0611c7a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611a3a, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611a7a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xb8fbf801, + 0xbefe007c, 0xbefc007a, + 0xc0611efa, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0x8670ff7f, + 0x04000000, 0xbeef0080, + 0x876f6f70, 0xb8fa2a05, + 0x807a817a, 0x8e7a8a7a, + 0xb8f11605, 0x80718171, + 0x8e718471, 0x8e768271, + 0xbef600ff, 0x01000000, + 0xbef20174, 0x80747a74, + 0x82758075, 0xbefc0080, + 0xbf800000, 0xbe802b00, + 0xbe822b02, 0xbe842b04, + 0xbe862b06, 0xbe882b08, + 0xbe8a2b0a, 0xbe8c2b0c, + 0xbe8e2b0e, 0xc06b003a, + 0x00000000, 0xbf8cc07f, + 0xc06b013a, 0x00000010, + 0xbf8cc07f, 0xc06b023a, + 0x00000020, 0xbf8cc07f, + 0xc06b033a, 0x00000030, + 0xbf8cc07f, 0x8074c074, + 0x82758075, 0x807c907c, + 0xbf0a717c, 0xbf85ffe7, + 0xbef40172, 0xbefa0080, + 0xbefe00c1, 0xbeff00c1, + 0xbee80080, 0xbee90080, + 0xbef600ff, 0x01000000, + 0xe0724000, 0x7a1d0000, + 0xe0724100, 0x7a1d0100, + 0xe0724200, 0x7a1d0200, + 0xe0724300, 0x7a1d0300, + 0xbefe00c1, 0xbeff00c1, + 0xb8f14306, 0x8671c171, + 0xbf84002c, 0xbf8a0000, + 0x8670ff6f, 0x04000000, + 0xbf840028, 0x8e718671, + 0x8e718271, 0xbef60071, + 0xb8fa2a05, 0x807a817a, + 0x8e7a8a7a, 0xb8f01605, + 0x80708170, 0x8e708670, + 0x807a707a, 0x807aff7a, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xd28c0002, 0x000100c1, + 0xd28d0003, 0x000204c1, + 0xd1060002, 0x00011103, + 0x7e0602ff, 0x00000200, + 0xbefc00ff, 0x00010000, + 0xbe800077, 0x8677ff77, + 0xff7fffff, 0x8777ff77, + 0x00058000, 0xd8ec0000, + 0x00000002, 0xbf8cc07f, + 0xe0765000, 0x7a1d0002, + 0x68040702, 0xd0c9006a, + 0x0000e302, 0xbf87fff7, + 0xbef70000, 0xbefa00ff, + 0x00000400, 0xbefe00c1, + 0xbeff00c1, 0xb8f12a05, + 0x80718171, 0x8e718271, + 0x8e768871, 0xbef600ff, + 0x01000000, 0xbefc0084, + 0xbf0a717c, 0xbf840015, + 0xbf11017c, 0x8071ff71, + 0x00001000, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0xe0724000, + 0x7a1d0000, 0xe0724100, + 0x7a1d0100, 0xe0724200, + 0x7a1d0200, 0xe0724300, + 0x7a1d0300, 0x807c847c, + 0x807aff7a, 0x00000400, + 0xbf0a717c, 0xbf85ffef, + 0xbf9c0000, 0xbf8200d9, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x866eff7f, 0x08000000, + 0x8f6e836e, 0x87776e77, + 0x866eff7f, 0x70000000, + 0x8f6e816e, 0x87776e77, + 0x866eff7f, 0x04000000, + 0xbf84001e, 0xbefe00c1, + 0xbeff00c1, 0xb8ef4306, + 0x866fc16f, 0xbf840019, + 0x8e6f866f, 0x8e6f826f, + 0xbef6006f, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0x8078ff78, 0x00000080, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xe0510000, + 0x781d0000, 0xe0510100, + 0x781d0000, 0x807cff7c, + 0x00000200, 0x8078ff78, + 0x00000200, 0xbf0a6f7c, + 0xbf85fff6, 0xbef80080, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef2a05, 0x806f816f, + 0x8e6f826f, 0x8e76886f, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbefc0084, + 0xbf11087c, 0x806fff6f, + 0x00008000, 0xe0524000, + 0x781d0000, 0xe0524100, + 0x781d0100, 0xe0524200, + 0x781d0200, 0xe0524300, + 0x781d0300, 0xbf8c0f70, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0x807c847c, 0x8078ff78, + 0x00000400, 0xbf0a6f7c, + 0xbf85ffee, 0xbf9c0000, + 0xe0524000, 0x6e1d0000, + 0xe0524100, 0x6e1d0100, + 0xe0524200, 0x6e1d0200, + 0xe0524300, 0x6e1d0300, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0x80f8c078, + 0xb8ef1605, 0x806f816f, + 0x8e6f846f, 0x8e76826f, + 0xbef600ff, 0x01000000, + 0xbefc006f, 0xc031003a, + 0x00000078, 0x80f8c078, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff0, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xc0211bfa, + 0x00000078, 0x80788478, + 0xc0211b3a, 0x00000078, + 0x80788478, 0xc0211b7a, + 0x00000078, 0x80788478, + 0xc0211eba, 0x00000078, + 0x80788478, 0xc0211efa, + 0x00000078, 0x80788478, + 0xc0211c3a, 0x00000078, + 0x80788478, 0xc0211c7a, + 0x00000078, 0x80788478, + 0xc0211a3a, 0x00000078, + 0x80788478, 0xc0211a7a, + 0x00000078, 0x80788478, + 0xc0211cfa, 0x00000078, + 0x80788478, 0xbf8cc07f, + 0x866dff6d, 0x0000ffff, + 0xbefc006f, 0xbefe007a, + 0xbeff007b, 0x866f71ff, + 0x000003ff, 0xb96f4803, + 0x866f71ff, 0xfffff800, + 0x8f6f8b6f, 0xb96fa2c3, + 0xb973f801, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8ef1605, 0x806f816f, + 0x8e6f866f, 0x806e6f6e, + 0x806e746e, 0x826f8075, + 0x866fff6f, 0x0000ffff, + 0xc0071cb7, 0x00000040, + 0xc00b1d37, 0x00000048, + 0xc0031e77, 0x00000058, + 0xc0071eb7, 0x0000005c, + 0xbf8cc07f, 0x866fff6d, + 0xf0000000, 0x8f6f9c6f, + 0x8e6f906f, 0xbeee0080, + 0x876e6f6e, 0x866fff6d, + 0x08000000, 0x8f6f9b6f, + 0x8e6f8f6f, 0x876e6f6e, + 0x866fff70, 0x00800000, + 0x8f6f976f, 0xb96ef807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb970f802, 0xbf8a0000, + 0x95806f6c, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index c368ce3e96ff..053f1d0f80b8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -30,6 +30,7 @@ #include "kfd_device_queue_manager.h" #include "kfd_pm4_headers_vi.h" #include "cwsr_trap_handler_gfx8.asm" +#include "cwsr_trap_handler_gfx9.asm" #include "kfd_iommu.h" #define MQD_SIZE_ALIGNED 768 @@ -333,10 +334,16 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, static void kfd_cwsr_init(struct kfd_dev *kfd) { if (cwsr_enable && kfd->device_info->supports_cwsr) { - BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); + if (kfd->device_info->asic_family < CHIP_VEGA10) { + BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); + kfd->cwsr_isa = cwsr_trap_gfx8_hex; + kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); + } else { + BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); + kfd->cwsr_isa = cwsr_trap_gfx9_hex; + kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); + } - kfd->cwsr_isa = cwsr_trap_gfx8_hex; - kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); kfd->cwsr_enabled = true; } } -- cgit v1.2.1 From 6106dce9559ec5a4b1a97302f2fcc508e40d2747 Mon Sep 17 00:00:00 2001 From: welu Date: Tue, 10 Apr 2018 17:33:17 -0400 Subject: drm/amdkfd: Try to enable atomics for all GPUs Report failure to enable atomics only on GPUs that require them. This allows GPUs that don't require atomics to function, but can benefit if they are available. This is the case for Vega10, which doesn't use atomics for basic functioning of the MEC, AQL and HWS microcode. So it can work without atomics. But shader programs can still use atomic instructions on systems that support PCIe atomics. Signed-off-by: welu Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 053f1d0f80b8..0e64fb2c95e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -290,7 +290,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) { struct kfd_dev *kfd; - + int ret; const struct kfd_device_info *device_info = lookup_device_info(pdev->device); @@ -299,19 +299,18 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, return NULL; } - if (device_info->needs_pci_atomics) { - /* Allow BIF to recode atomics to PCIe 3.0 - * AtomicOps. 32 and 64-bit requests are possible and - * must be supported. - */ - if (pci_enable_atomic_ops_to_root(pdev, - PCI_EXP_DEVCAP2_ATOMIC_COMP32 | - PCI_EXP_DEVCAP2_ATOMIC_COMP64) < 0) { - dev_info(kfd_device, - "skipped device %x:%x, PCI rejects atomics", - pdev->vendor, pdev->device); - return NULL; - } + /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. + * 32 and 64-bit requests are possible and must be + * supported. + */ + ret = pci_enable_atomic_ops_to_root(pdev, + PCI_EXP_DEVCAP2_ATOMIC_COMP32 | + PCI_EXP_DEVCAP2_ATOMIC_COMP64); + if (device_info->needs_pci_atomics && ret < 0) { + dev_info(kfd_device, + "skipped device %x:%x, PCI rejects atomics\n", + pdev->vendor, pdev->device); + return NULL; } kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); -- cgit v1.2.1 From 389056e5fef477c838dc20a08d6f1de960cf027b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 10 Apr 2018 17:33:18 -0400 Subject: drm/amdkfd: Add Vega10 topology and device info * Report 64-bit doorbells as HSA_CAP_DOORBELL_TYPE_2_0 in topology * Report cache information in topology (duplicates GFXv8 info for now) * Add device info for Vega10 support in KFD Raven is not enabled at this time as it needs additional changes in DQM to work with a single SDMA engine. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 11 +++++++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 37 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 6 +++++ drivers/gpu/drm/amd/amdkfd/kfd_topology.h | 1 + 4 files changed, 55 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 4f126ef6139b..296b3f230280 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -132,6 +132,9 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { #define fiji_cache_info carrizo_cache_info #define polaris10_cache_info carrizo_cache_info #define polaris11_cache_info carrizo_cache_info +/* TODO - check & update Vega10 cache details */ +#define vega10_cache_info carrizo_cache_info +#define raven_cache_info carrizo_cache_info static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev, struct crat_subtype_computeunit *cu) @@ -603,6 +606,14 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = polaris11_cache_info; num_of_cache_types = ARRAY_SIZE(polaris11_cache_info); break; + case CHIP_VEGA10: + pcache_info = vega10_cache_info; + num_of_cache_types = ARRAY_SIZE(vega10_cache_info); + break; + case CHIP_RAVEN: + pcache_info = raven_cache_info; + num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0e64fb2c95e5..dd6c7535b6b4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -182,6 +182,34 @@ static const struct kfd_device_info polaris11_device_info = { .needs_pci_atomics = true, }; +static const struct kfd_device_info vega10_device_info = { + .asic_family = CHIP_VEGA10, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, +}; + +static const struct kfd_device_info vega10_vf_device_info = { + .asic_family = CHIP_VEGA10, + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, +}; + struct kfd_deviceid { unsigned short did; @@ -261,6 +289,15 @@ static const struct kfd_deviceid supported_devices[] = { { 0x67EB, &polaris11_device_info }, /* Polaris11 */ { 0x67EF, &polaris11_device_info }, /* Polaris11 */ { 0x67FF, &polaris11_device_info }, /* Polaris11 */ + { 0x6860, &vega10_device_info }, /* Vega10 */ + { 0x6861, &vega10_device_info }, /* Vega10 */ + { 0x6862, &vega10_device_info }, /* Vega10 */ + { 0x6863, &vega10_device_info }, /* Vega10 */ + { 0x6864, &vega10_device_info }, /* Vega10 */ + { 0x6867, &vega10_device_info }, /* Vega10 */ + { 0x6868, &vega10_device_info }, /* Vega10 */ + { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ + { 0x687F, &vega10_device_info }, /* Vega10 */ }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index ac28abc94e57..bc95d4dfee2e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1239,6 +1239,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; + case CHIP_VEGA10: + case CHIP_RAVEN: + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + break; default: WARN(1, "Unexpected ASIC family %u", dev->gpu->device_info->asic_family); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index eb54cfcaf039..7d9c3f948dff 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -45,6 +45,7 @@ #define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 #define HSA_CAP_DOORBELL_TYPE_1_0 0x1 +#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 #define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 struct kfd_node_properties { -- cgit v1.2.1 From 86993018d7d23b934d1c884be0fbf0bcfa15b8c5 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 15 Mar 2018 16:40:02 -0400 Subject: drm/amdgpu: Add CM_TEST_DEBUG regs for DCN We'd like to use them for reading DCN debug status. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h | 19 ++++++++++++++++--- .../drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h | 8 ++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h index 4ccf9681c45d..721c61171045 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h @@ -3895,6 +3895,10 @@ #define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2 #define mmCM0_CM_MEM_PWR_STATUS 0x0d33 #define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_INDEX 0x0d35 +#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM0_CM_TEST_DEBUG_DATA 0x0d36 +#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec @@ -4367,7 +4371,10 @@ #define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2 #define mmCM1_CM_MEM_PWR_STATUS 0x0e4e #define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2 - +#define mmCM1_CM_TEST_DEBUG_INDEX 0x0e50 +#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM1_CM_TEST_DEBUG_DATA 0x0e51 +#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp1_dispdec_dpp_dcperfmon_dc_perfmon_dispdec // base address: 0x399c @@ -4839,7 +4846,10 @@ #define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2 #define mmCM2_CM_MEM_PWR_STATUS 0x0f69 #define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2 - +#define mmCM2_CM_TEST_DEBUG_INDEX 0x0f6b +#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM2_CM_TEST_DEBUG_DATA 0x0f6c +#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp2_dispdec_dpp_dcperfmon_dc_perfmon_dispdec // base address: 0x3e08 @@ -5311,7 +5321,10 @@ #define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2 #define mmCM3_CM_MEM_PWR_STATUS 0x1084 #define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2 - +#define mmCM3_CM_TEST_DEBUG_INDEX 0x1086 +#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2 +#define mmCM3_CM_TEST_DEBUG_DATA 0x1087 +#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2 // addressBlock: dce_dc_dpp3_dispdec_dpp_dcperfmon_dc_perfmon_dispdec // base address: 0x4274 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h index e2a2f114bd8e..e7c0cad41081 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_sh_mask.h @@ -14049,6 +14049,14 @@ #define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE__SHIFT 0x2 #define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L #define CM0_CM_MEM_PWR_STATUS__RGAM_MEM_PWR_STATE_MASK 0x0000000CL +//CM0_CM_TEST_DEBUG_INDEX +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8 +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL +#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L +//CM0_CM_TEST_DEBUG_DATA +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0 +#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL // addressBlock: dce_dc_dpp0_dispdec_dpp_dcperfmon_dc_perfmon_dispdec -- cgit v1.2.1 From 35d13315957f906774013ec374ce2263b665706c Mon Sep 17 00:00:00 2001 From: Martin Tsai Date: Wed, 7 Mar 2018 04:22:03 +0800 Subject: drm/amd/display: correct the condition in setting cursor not visible beyond left edge Signed-off-by: Martin Tsai Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index e305c28c98de..3356125a6117 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -416,7 +416,7 @@ void dpp1_set_cursor_position( if (src_x_offset >= (int)param->viewport_width) cur_en = 0; /* not visible beyond right edge*/ - if (src_x_offset + (int)width < 0) + if (src_x_offset + (int)width <= 0) cur_en = 0; /* not visible beyond left edge*/ REG_UPDATE(CURSOR0_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 39b72f696ae9..81b81e6efcd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -897,7 +897,7 @@ void hubp1_cursor_set_position( if (src_x_offset >= (int)param->viewport_width) cur_en = 0; /* not visible beyond right edge*/ - if (src_x_offset + (int)hubp->curs_attr.width < 0) + if (src_x_offset + (int)hubp->curs_attr.width <= 0) cur_en = 0; /* not visible beyond left edge*/ if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) -- cgit v1.2.1 From ba0a5aaa867d77cedb2cd6ad9e647243d9ba9650 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 21 Feb 2018 16:41:42 -0500 Subject: drm/amd/display: dal 3.1.39 Signed-off-by: Tony Cheng Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index fa4b3c8b3bb7..4d9da9d9c731 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.38" +#define DC_VER "3.1.39" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From 3c1a312aa4e4201efa8719e70a6dccd3acd6eba4 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Wed, 7 Mar 2018 09:12:53 -0500 Subject: drm/amd/display: Retry when read dpcd caps failed. Some DP panel not detected intermittently due to read dpcd caps failed when doing hot plug. [root cause] DC_HPD_CONNECT_INT_DELAY is set to 0, not delay after HPD toggle and read dpcd data, while some panel need 4ms defer to read. [solution] Add a retry when read failed. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 3b5053570229..b86325bb636f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2278,6 +2278,8 @@ static bool retrieve_link_cap(struct dc_link *link) union edp_configuration_cap edp_config_cap; union dp_downstream_port_present ds_port = { 0 }; enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t read_dpcd_retry_cnt = 3; + int i; memset(dpcd_data, '\0', sizeof(dpcd_data)); memset(&down_strm_port_count, @@ -2285,11 +2287,15 @@ static bool retrieve_link_cap(struct dc_link *link) memset(&edp_config_cap, '\0', sizeof(union edp_configuration_cap)); - status = core_link_read_dpcd( - link, - DP_DPCD_REV, - dpcd_data, - sizeof(dpcd_data)); + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } if (status != DC_OK) { dm_error("%s: Read dpcd data failed.\n", __func__); -- cgit v1.2.1 From b552204b10ef30940d374510a1572b2eb4e24af6 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Tue, 6 Mar 2018 13:41:38 -0500 Subject: drm/amd/display: Update ASIC header files Also separate register address initialization between ASICs for the registers that were removed in scaled-down variation of the ASIC. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 20 ++++++++------ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 32 +++++++++++++++-------- 2 files changed, 33 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 81b81e6efcd4..4ca9b6e9a824 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -535,11 +535,13 @@ void hubp1_program_deadline( REG_SET(VBLANK_PARAMETERS_3, 0, REFCYC_PER_META_CHUNK_VBLANK_L, dlg_attr->refcyc_per_meta_chunk_vblank_l); - REG_SET(NOM_PARAMETERS_0, 0, - DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); + if (REG(NOM_PARAMETERS_0)) + REG_SET(NOM_PARAMETERS_0, 0, + DST_Y_PER_PTE_ROW_NOM_L, dlg_attr->dst_y_per_pte_row_nom_l); - REG_SET(NOM_PARAMETERS_1, 0, - REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l); + if (REG(NOM_PARAMETERS_1)) + REG_SET(NOM_PARAMETERS_1, 0, + REFCYC_PER_PTE_GROUP_NOM_L, dlg_attr->refcyc_per_pte_group_nom_l); REG_SET(NOM_PARAMETERS_4, 0, DST_Y_PER_META_ROW_NOM_L, dlg_attr->dst_y_per_meta_row_nom_l); @@ -568,11 +570,13 @@ void hubp1_program_deadline( REG_SET(VBLANK_PARAMETERS_4, 0, REFCYC_PER_META_CHUNK_VBLANK_C, dlg_attr->refcyc_per_meta_chunk_vblank_c); - REG_SET(NOM_PARAMETERS_2, 0, - DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); + if (REG(NOM_PARAMETERS_2)) + REG_SET(NOM_PARAMETERS_2, 0, + DST_Y_PER_PTE_ROW_NOM_C, dlg_attr->dst_y_per_pte_row_nom_c); - REG_SET(NOM_PARAMETERS_3, 0, - REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c); + if (REG(NOM_PARAMETERS_3)) + REG_SET(NOM_PARAMETERS_3, 0, + REFCYC_PER_PTE_GROUP_NOM_C, dlg_attr->refcyc_per_pte_group_nom_c); REG_SET(NOM_PARAMETERS_6, 0, DST_Y_PER_META_ROW_NOM_C, dlg_attr->dst_y_per_meta_row_nom_c); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 4a3703e12ea1..c794ce4a8177 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -30,6 +30,7 @@ #define TO_DCN10_HUBP(hubp)\ container_of(hubp, struct dcn10_hubp, base) +/* Register address initialization macro for all ASICs (including those with reduced functionality) */ #define HUBP_REG_LIST_DCN(id)\ SRI(DCHUBP_CNTL, HUBP, id),\ SRI(HUBPREQ_DEBUG_DB, HUBP, id),\ @@ -78,16 +79,12 @@ SRI(REF_FREQ_TO_PIX_FREQ, HUBPREQ, id),\ SRI(VBLANK_PARAMETERS_1, HUBPREQ, id),\ SRI(VBLANK_PARAMETERS_3, HUBPREQ, id),\ - SRI(NOM_PARAMETERS_0, HUBPREQ, id),\ - SRI(NOM_PARAMETERS_1, HUBPREQ, id),\ SRI(NOM_PARAMETERS_4, HUBPREQ, id),\ SRI(NOM_PARAMETERS_5, HUBPREQ, id),\ SRI(PER_LINE_DELIVERY_PRE, HUBPREQ, id),\ SRI(PER_LINE_DELIVERY, HUBPREQ, id),\ SRI(VBLANK_PARAMETERS_2, HUBPREQ, id),\ SRI(VBLANK_PARAMETERS_4, HUBPREQ, id),\ - SRI(NOM_PARAMETERS_2, HUBPREQ, id),\ - SRI(NOM_PARAMETERS_3, HUBPREQ, id),\ SRI(NOM_PARAMETERS_6, HUBPREQ, id),\ SRI(NOM_PARAMETERS_7, HUBPREQ, id),\ SRI(DCN_TTU_QOS_WM, HUBPREQ, id),\ @@ -96,11 +93,19 @@ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ - SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id) +/* Register address initialization macro for "generic" ASICs with full functionality */ +#define HUBP_REG_LIST_DCN_GEN(id)\ + SRI(NOM_PARAMETERS_0, HUBPREQ, id),\ + SRI(NOM_PARAMETERS_1, HUBPREQ, id),\ + SRI(NOM_PARAMETERS_2, HUBPREQ, id),\ + SRI(NOM_PARAMETERS_3, HUBPREQ, id),\ + SRI(DCN_VM_MX_L1_TLB_CNTL, HUBPREQ, id) + #define HUBP_REG_LIST_DCN10(id)\ HUBP_REG_LIST_DCN(id),\ + HUBP_REG_LIST_DCN_GEN(id),\ SRI(PREFETCH_SETTINS, HUBPREQ, id),\ SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\ @@ -237,6 +242,7 @@ #define HUBP_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix +/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */ #define HUBP_MASK_SH_LIST_DCN(mask_sh)\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\ @@ -335,8 +341,6 @@ HUBP_SF(HUBPREQ0_REF_FREQ_TO_PIX_FREQ, REF_FREQ_TO_PIX_FREQ, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_1, REFCYC_PER_PTE_GROUP_VBLANK_L, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_3, REFCYC_PER_META_CHUNK_VBLANK_L, mask_sh),\ - HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ - HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_4, DST_Y_PER_META_ROW_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_5, REFCYC_PER_META_CHUNK_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY_PRE, REFCYC_PER_LINE_DELIVERY_PRE_L, mask_sh),\ @@ -345,8 +349,6 @@ HUBP_SF(HUBPREQ0_PER_LINE_DELIVERY, REFCYC_PER_LINE_DELIVERY_C, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_2, REFCYC_PER_PTE_GROUP_VBLANK_C, mask_sh),\ HUBP_SF(HUBPREQ0_VBLANK_PARAMETERS_4, REFCYC_PER_META_CHUNK_VBLANK_C, mask_sh),\ - HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\ - HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_6, DST_Y_PER_META_ROW_NOM_C, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_7, REFCYC_PER_META_CHUNK_NOM_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_TTU_QOS_WM, QoS_LEVEL_LOW_WM, mask_sh),\ @@ -357,12 +359,20 @@ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ - HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\ - HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) +/* Mask/shift struct generation macro for "generic" ASICs with full functionality */ +#define HUBP_MASK_SH_LIST_DCN_GEN(mask_sh)\ + HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ + HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\ + HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\ + HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh) + #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ HUBP_MASK_SH_LIST_DCN(mask_sh),\ + HUBP_MASK_SH_LIST_DCN_GEN(mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\ -- cgit v1.2.1 From e4b3f6f299436be812aca4845bd20f592eaf074e Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 8 Mar 2018 12:08:01 -0500 Subject: drm/amd/display: fix Polaris 12 bw bounding box Signed-off-by: Dmytro Laktyushkin Signed-off-by: Harry Wentland Reviewed-by: Bhawanpreet Lakha Acked-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 121 ++++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h | 1 + 2 files changed, 120 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 0cbab81ab304..821502b1acba 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -52,10 +52,11 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi return BW_CALCS_VERSION_CARRIZO; case FAMILY_VI: + if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS12; if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev)) return BW_CALCS_VERSION_POLARIS10; - if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev) || - ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) + if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) return BW_CALCS_VERSION_POLARIS11; return BW_CALCS_VERSION_INVALID; @@ -2373,6 +2374,122 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2; dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); break; + case BW_CALCS_VERSION_POLARIS12: + vbios.memory_type = bw_def_gddr5; + vbios.dram_channel_width_in_bits = 32; + vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits; + vbios.number_of_dram_banks = 8; + vbios.high_yclk = bw_int_to_fixed(6000); + vbios.mid_yclk = bw_int_to_fixed(3200); + vbios.low_yclk = bw_int_to_fixed(1000); + vbios.low_sclk = bw_int_to_fixed(678); + vbios.mid1_sclk = bw_int_to_fixed(864); + vbios.mid2_sclk = bw_int_to_fixed(900); + vbios.mid3_sclk = bw_int_to_fixed(920); + vbios.mid4_sclk = bw_int_to_fixed(940); + vbios.mid5_sclk = bw_int_to_fixed(960); + vbios.mid6_sclk = bw_int_to_fixed(980); + vbios.high_sclk = bw_int_to_fixed(1049); + vbios.low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios.mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios.high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios.low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios.mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios.high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios.data_return_bus_width = bw_int_to_fixed(32); + vbios.trc = bw_int_to_fixed(48); + if (vbios.number_of_dram_channels == 2) // 64-bit + vbios.dmifmc_urgent_latency = bw_int_to_fixed(4); + else + vbios.dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios.stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios.stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios.nbp_state_change_latency = bw_int_to_fixed(250); + vbios.mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios.scatter_gather_enable = false; + vbios.down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios.cursor_width = 32; + vbios.average_compression_rate = 4; + vbios.number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios.blackout_duration = bw_int_to_fixed(0); /* us */ + vbios.maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip.max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip.max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip.percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip.large_cursor = false; + dceip.dmif_request_buffer_size = bw_int_to_fixed(768); + dceip.dmif_pipe_en_fbc_chunk_tracker = false; + dceip.cursor_max_outstanding_group_num = 1; + dceip.lines_interleaved_into_lb = 2; + dceip.chunk_width = 256; + dceip.number_of_graphics_pipes = 5; + dceip.number_of_underlay_pipes = 0; + dceip.low_power_tiling_mode = 0; + dceip.display_write_back_supported = true; + dceip.argb_compression_support = true; + dceip.underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip.underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip.underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip.underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip.graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip.graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip.graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip.graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip.alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip.max_dmif_buffer_allocated = 4; + dceip.graphics_dmif_size = 12288; + dceip.underlay_luma_dmif_size = 19456; + dceip.underlay_chroma_dmif_size = 23552; + dceip.pre_downscaler_enabled = true; + dceip.underlay_downscale_prefetch_enabled = true; + dceip.lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip.lb_size_per_component444 = bw_int_to_fixed(245952); + dceip.graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip.stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip.underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip.underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip.underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip.cursor_chunk_width = bw_int_to_fixed(64); + dceip.cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip.underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip.underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip.peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip.peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip.minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip.maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip.limit_excessive_outstanding_dmif_requests = true; + dceip.linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip.scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip.display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip.display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip.request_efficiency = bw_frc_to_fixed(8, 10); + dceip.dispclk_per_request = bw_int_to_fixed(2); + dceip.dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip.display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip.scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; case BW_CALCS_VERSION_STONEY: vbios.memory_type = bw_def_gddr5; vbios.dram_channel_width_in_bits = 64; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h index a9bfe9ff8ce6..0bd87f24fc06 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h @@ -42,6 +42,7 @@ enum bw_calcs_version { BW_CALCS_VERSION_CARRIZO, BW_CALCS_VERSION_POLARIS10, BW_CALCS_VERSION_POLARIS11, + BW_CALCS_VERSION_POLARIS12, BW_CALCS_VERSION_STONEY, BW_CALCS_VERSION_VEGA10 }; -- cgit v1.2.1 From deb0aac6af79265408c2b3c62b3d1150e7c46a1b Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Fri, 9 Mar 2018 14:45:07 -0500 Subject: drm/amd/display: Rename feature-specific register address init macro Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index c794ce4a8177..e0d6d32357c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -95,8 +95,8 @@ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id) -/* Register address initialization macro for "generic" ASICs with full functionality */ -#define HUBP_REG_LIST_DCN_GEN(id)\ +/* Register address initialization macro for ASICs with VM */ +#define HUBP_REG_LIST_DCN_VM(id)\ SRI(NOM_PARAMETERS_0, HUBPREQ, id),\ SRI(NOM_PARAMETERS_1, HUBPREQ, id),\ SRI(NOM_PARAMETERS_2, HUBPREQ, id),\ @@ -105,7 +105,7 @@ #define HUBP_REG_LIST_DCN10(id)\ HUBP_REG_LIST_DCN(id),\ - HUBP_REG_LIST_DCN_GEN(id),\ + HUBP_REG_LIST_DCN_VM(id),\ SRI(PREFETCH_SETTINS, HUBPREQ, id),\ SRI(PREFETCH_SETTINS_C, HUBPREQ, id),\ SRI(DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB, HUBPREQ, id),\ @@ -361,8 +361,8 @@ HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\ HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh) -/* Mask/shift struct generation macro for "generic" ASICs with full functionality */ -#define HUBP_MASK_SH_LIST_DCN_GEN(mask_sh)\ +/* Mask/shift struct generation macro for ASICs with VM */ +#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_1, REFCYC_PER_PTE_GROUP_NOM_L, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\ @@ -372,7 +372,7 @@ #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ HUBP_MASK_SH_LIST_DCN(mask_sh),\ - HUBP_MASK_SH_LIST_DCN_GEN(mask_sh),\ + HUBP_MASK_SH_LIST_DCN_VM(mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, DST_Y_PREFETCH, mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS, VRATIO_PREFETCH, mask_sh),\ HUBP_SF(HUBPREQ0_PREFETCH_SETTINS_C, VRATIO_PREFETCH_C, mask_sh),\ -- cgit v1.2.1 From a12c3b7d4e2ac7837c23620ebc3e42b397c1c321 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 8 Mar 2018 22:05:35 -0500 Subject: drm/amd/display: Don't read EDID in atomic_check We shouldn't attempt to read EDID in atomic_check. We really shouldn't even be modifying the connector object, or any other non-state object, but this is a start at least. Moving EDID cleanup to dm_dp_mst_connector_destroy from dm_dp_destroy_mst_connector to ensure the EDID is still available for headless mode. Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 32 +++++++--------------- 1 file changed, 10 insertions(+), 22 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8291d74f26bc..305292a9ff80 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -161,6 +161,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector) struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; + if (amdgpu_dm_connector->edid) { + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; + } + drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); drm_connector_cleanup(connector); @@ -181,28 +186,22 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { void dm_dp_mst_dc_sink_create(struct drm_connector *connector) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct edid *edid; struct dc_sink *dc_sink; struct dc_sink_init_data init_params = { .link = aconnector->dc_link, .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + /* FIXME none of this is safe. we shouldn't touch aconnector here in + * atomic_check + */ + /* * TODO: Need to further figure out why ddc.algo is NULL while MST port exists */ if (!aconnector->port || !aconnector->port->aux.ddc.algo) return; - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); - - if (!edid) { - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); - return; - } - - aconnector->edid = edid; + ASSERT(aconnector->edid); dc_sink = dc_link_add_remote_sink( aconnector->dc_link, @@ -215,9 +214,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector) amdgpu_dm_add_sink_to_freesync_module( connector, aconnector->edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, aconnector->edid); } static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -424,14 +420,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; } - if (aconnector->edid) { - kfree(aconnector->edid); - aconnector->edid = NULL; - } - - drm_mode_connector_update_edid_property( - &aconnector->base, - NULL); aconnector->mst_connected = false; } -- cgit v1.2.1 From dfd01f299987e7ede74e27d422c43846d1326010 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 24 Jan 2018 14:28:30 -0500 Subject: drm/amd/display: add mpc to dtn log Signed-off-by: Dmytro Laktyushkin Reviewed-by: Wesley Chalmers Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 31 ++++++++++++++++------ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 17 ++++++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h | 5 ++++ drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h | 15 +++++++++++ 4 files changed, 60 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8b0f6b8a5627..999190aa8a08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -121,20 +121,19 @@ void dcn10_log_hw_state(struct dc *dc) dcn10_log_hubbub_state(dc); - DTN_INFO("HUBP:\t format \t addr_hi \t width \t height \t " - "rotation \t mirror \t sw_mode \t " - "dcc_en \t blank_en \t ttu_dis \t underflow \t " - "min_ttu_vblank \t qos_low_wm \t qos_high_wm \n"); - + DTN_INFO("HUBP: format addr_hi width height " + "rotation mirror sw_mode " + "dcc_en blank_en ttu_dis underflow " + "min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state s; hubp1_read_state(TO_DCN10_HUBP(hubp), &s); - DTN_INFO("[%d]:\t %xh \t %xh \t %d \t %d \t " - "%xh \t %xh \t %xh \t " - "%d \t %d \t %d \t %xh \t", + DTN_INFO("[%-2d]: %5xh %6xh %5d %6d " + "%7xh %5xh %6xh " + "%6d %8d %7d %8xh \t", hubp->inst, s.pixel_format, s.inuse_addr_hi, @@ -153,6 +152,22 @@ void dcn10_log_hw_state(struct dc *dc) DTN_INFO("\n"); } DTN_INFO("\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct output_pixel_processor *opp = pool->opps[i]; + struct mpcc *mpcc = opp->mpc_tree_params.opp_list; + struct mpcc_state s = {0}; + + while (mpcc) { + ASSERT(opp->mpc_tree_params.opp_id == opp->inst); + pool->mpc->funcs->read_mpcc_state(pool->mpc, mpcc->mpcc_id, &s); + DTN_INFO("[OPP%d - MPCC%d]: DPP%d MPCCBOT%x MODE:%d ALPHA_MODE:%d PREMULT:%d OVERLAP_ONLY:%d\n", + s.opp_id, mpcc->mpcc_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only); + mpcc = mpcc->mpcc_bot; + ASSERT(!mpcc || mpcc->mpcc_id == s.bot_mpcc_id); + } + } + DTN_INFO("\n"); DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t " "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 179890b1a8c4..29e15a93a7d0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -409,7 +409,24 @@ void mpc1_init_mpcc_list_from_hw( } } +void mpc1_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s) +{ + struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + + REG_GET(MPCC_OPP_ID[mpcc_inst], MPCC_OPP_ID, &s->opp_id); + REG_GET(MPCC_TOP_SEL[mpcc_inst], MPCC_TOP_SEL, &s->dpp_id); + REG_GET(MPCC_BOT_SEL[mpcc_inst], MPCC_BOT_SEL, &s->bot_mpcc_id); + REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, + MPCC_ALPHA_BLND_MODE, &s->alpha_mode, + MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->pre_multiplied_alpha); +} + const struct mpc_funcs dcn10_mpc_funcs = { + .read_mpcc_state = mpc1_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc1_mpc_init, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h index 267a2995ef6e..d3d16c4cbea3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h @@ -183,4 +183,9 @@ struct mpcc *mpc1_get_mpcc_for_dpp( struct mpc_tree *tree, int dpp_id); +void mpc1_read_mpcc_state( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 23a8d5e53a89..5caacab216b5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -105,7 +105,22 @@ struct mpc { struct mpcc mpcc_array[MAX_MPCC]; }; +struct mpcc_state { + uint32_t opp_id; + uint32_t dpp_id; + uint32_t bot_mpcc_id; + uint32_t mode; + uint32_t alpha_mode; + uint32_t pre_multiplied_alpha; + uint32_t overlap_only; +}; + struct mpc_funcs { + void (*read_mpcc_state)( + struct mpc *mpc, + int mpcc_inst, + struct mpcc_state *s); + /* * Insert DPP into MPC tree based on specified blending position. * Only used for planes that are part of blending chain for OPP output -- cgit v1.2.1 From 1249acefefd43006127e58acf9c67de8038d770b Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 8 Mar 2018 14:58:11 -0500 Subject: drm/amd/display: Add debug prints for bandwidth calculations Using the three functions we can print the dceip, vbios and data struct for bandwidth calculations. This is useful for debugging bandwidth calculation issues without a debugger Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Reviewed-by: Tony Cheng Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/calcs/calcs_logger.h | 579 +++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 6 + drivers/gpu/drm/amd/display/dc/dc.h | 1 + 3 files changed, 586 insertions(+) create mode 100644 drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h new file mode 100644 index 000000000000..fc3f98fb09ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h @@ -0,0 +1,579 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _CALCS_CALCS_LOGGER_H_ +#define _CALCS_CALCS_LOGGER_H_ +#define DC_LOGGER \ + logger + +static void print_bw_calcs_dceip(struct dal_logger *logger, const struct bw_calcs_dceip *dceip) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version); + DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor); + DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported); + DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support); + DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d", + dceip->underlay_downscale_prefetch_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d", + dceip->graphics_lb_nodownscaling_multi_line_prefetching); + DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d", + dceip->limit_excessive_outstanding_dmif_requests); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d", + dceip->cursor_max_outstanding_group_num); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d", + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d", + dceip->display_write_back420_luma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d", + dceip->display_write_back420_chroma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d", + dceip->scatter_gather_pte_request_rows_in_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d", + bw_fixed_to_int(dceip->alpha_vscaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d", + bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d", + bw_fixed_to_int(dceip->lb_size_per_component444)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d", + bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay422_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d", + bw_fixed_to_int(dceip->cursor_dcp_buffer_lines)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d", + bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d", + bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d", + bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d", + bw_fixed_to_int(dceip->dispclk_ramping_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d", + bw_fixed_to_int(dceip->display_pipe_throughput_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d", + bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d", + bw_fixed_to_int(dceip->dmif_request_buffer_size)); + + +} + +static void print_bw_calcs_vbios(struct dal_logger *logger, const struct bw_calcs_vbios *vbios) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->mid_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->high_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->mid_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->high_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d", + bw_fixed_to_int(vbios->nbp_state_change_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d", + bw_fixed_to_int(vbios->mcifwrmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d", + bw_fixed_to_int(vbios->down_spread_percentage)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d", + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d", + bw_fixed_to_int(vbios->maximum_blackout_recovery_time)); + + +} + +static void print_bw_calcs_data(struct dal_logger *logger, struct bw_calcs_data *data) +{ + + int i, j, k; + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d", + data->panning_and_bezel_adjustment); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d", + bw_fixed_to_int(data->src_width_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d", + bw_fixed_to_int(data->src_height_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d", + bw_fixed_to_int(data->hsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d", + bw_fixed_to_int(data->src_width_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d", + bw_fixed_to_int(data->src_height_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d", + bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d", + bw_fixed_to_int(data->num_lines_at_frame_start)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d", + bw_fixed_to_int(data->min_mcifwr_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d", + bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d", + bw_fixed_to_int(data->useful_pte_per_pte_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d", + bw_fixed_to_int(data->scatter_gather_pte_request_rows)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d", + bw_fixed_to_int(data->scatter_gather_row_height)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d", + bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d", + bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d", + bw_fixed_to_int(data->cursor_total_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_requests)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d", + bw_fixed_to_int(data->bytes_per_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d", + bw_fixed_to_int(data->mcifwr_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_trips)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d", + bw_fixed_to_int(data->total_display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d", + bw_fixed_to_int(data->total_display_writes_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_writes_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d", + bw_fixed_to_int(data->display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d", + bw_fixed_to_int(data->dmif_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_read_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_writes_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d", + bw_fixed_to_int(data->dmif_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d", + bw_fixed_to_int(data->mcifwr_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d", + bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d", + bw_fixed_to_int(data->required_mcifmcwr_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d", + bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d", + bw_fixed_to_int(data->display_pipe_pixel_throughput)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d", + bw_fixed_to_int(data->total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d", + bw_fixed_to_int(data->total_write_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d", + bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d", + bw_fixed_to_int(data->min_pixels_per_data_fifo_entry)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d", + bw_fixed_to_int(data->line_source_pixels_transfer_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d", + bw_fixed_to_int(data->dmifdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d", + bw_fixed_to_int(data->mcifwrdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d", + bw_fixed_to_int(data->total_average_bandwidth_no_compression)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d", + bw_fixed_to_int(data->total_average_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d", + bw_fixed_to_int(data->total_stutter_cycle_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d", + bw_fixed_to_int(data->worst_number_of_trips_to_memory)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d", + bw_fixed_to_int(data->latency_for_non_dmif_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d", + bw_fixed_to_int(data->latency_for_non_mcifwr_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d", + bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d", + bw_fixed_to_int(data->dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d", + bw_fixed_to_int(data->min_vblank_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d", + bw_fixed_to_int(data->min_stutter_refresh_duration)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported)); + + for (i = 0; i < maximum_number_of_surfaces; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d", + i, data->scatter_gather_enable_for_pipe[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d", + i, data->interlace_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d", + i, data->display_pstate_change_enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d", + i, data->max_chunks_non_fbc_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d", + i, data->output_bppdp4_lane_hbr2[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d", + i, data->output_bppdp4_lane_hbr3[i]); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d", + i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d", + i, bw_fixed_to_int(data->displays_with_same_mode[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_refresh_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_exit_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_entry_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d", + i, bw_fixed_to_int(data->rotation_angle[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d", + i, bw_fixed_to_int(data->compression_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->source_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->display_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->request_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->useful_bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d", + i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d", + i, bw_fixed_to_int(data->latency_hiding_lines[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions_max[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_with_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_without_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d", + i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d", + i, bw_fixed_to_int(data->urgent_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d", + i, bw_fixed_to_int(data->nbp_state_change_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_cycle_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth_no_compression[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d", + i, bw_fixed_to_int(data->lb_size_per_component[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d", + i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d", + i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d", + i, bw_fixed_to_int(data->pte_request_per_chunk[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->cursor_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d", + i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->cursor_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d", + i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i])); + } + + for (i = 0; i < maximum_number_of_surfaces; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 8; k++) { + + DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, + bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k])); + } + } + } + + for (i = 0; i < 3; i++) { + for (j = 0; j < 8; j++) { + + DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d", + i, j, data->num_displays_with_margin[i][j]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dmif_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j])); + } + } + + for (i = 0; i < 6; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d", + i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i])); + } +} +; + +#endif /* _CALCS_CALCS_LOGGER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 821502b1acba..59acb0885039 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -28,6 +28,7 @@ #include "dc.h" #include "core_types.h" #include "dal_asic_id.h" +#include "calcs_logger.h" /* * NOTE: @@ -2990,6 +2991,11 @@ bool bw_calcs(struct dc_context *ctx, struct bw_fixed mid_yclk = vbios->mid_yclk; struct bw_fixed low_yclk = vbios->low_yclk; + if (ctx->dc->debug.bandwidth_calcs_trace) { + print_bw_calcs_dceip(ctx->logger, dceip); + print_bw_calcs_vbios(ctx->logger, vbios); + print_bw_calcs_data(ctx->logger, data); + } calculate_bandwidth(dceip, vbios, data); yclk_lvl = data->y_clk_level; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 4d9da9d9c731..bdc3cef002d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -202,6 +202,7 @@ struct dc_debug { bool timing_trace; bool clock_trace; bool validation_trace; + bool bandwidth_calcs_trace; /* stutter efficiency related */ bool disable_stutter; -- cgit v1.2.1 From c1f8d3fa4627ec7ec31cc538b471488980738631 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 7 Mar 2018 16:59:43 -0500 Subject: drm/amd/display: Don't call amdgpu_dm_display_resume as it doesn't exist amdgpu_dm_display_resume was merged into dm_resume. No need to call these functions separately. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e42a28e3adc5..bad9f09c588b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1629,7 +1629,6 @@ static ssize_t s3_debug_store(struct device *device, if (ret == 0) { if (s3_state) { dm_resume(adev); - amdgpu_dm_display_resume(adev); drm_kms_helper_hotplug_event(adev->ddev); } else dm_suspend(adev); -- cgit v1.2.1 From b361521f59764139067ada4ea9d6c213d583678f Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Mon, 12 Mar 2018 15:53:47 -0400 Subject: drm/amd/display: Adding stutter entry wm to dce bw struct Adding the stutter_entry_wm object to dce_bw_output struct and populating it with bw calculations data Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 111 ++++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 1 + 2 files changed, 110 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 59acb0885039..4b719328afd6 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -3091,7 +3091,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - + calcs_output->stutter_entry_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].a_mark = bw_fixed_to_int(bw_mul(data-> @@ -3186,7 +3212,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - + calcs_output->stutter_entry_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].b_mark = bw_fixed_to_int(bw_mul(data-> @@ -3279,6 +3331,34 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[0].c_mark = bw_fixed_to_int(bw_mul(data-> urgent_watermark[4], bw_int_to_fixed(1000))); @@ -3383,6 +3463,33 @@ bool bw_calcs(struct dc_context *ctx, bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].d_mark = bw_fixed_to_int(bw_mul(data-> diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 8c51ad70cace..55f56bf7d5b6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -250,6 +250,7 @@ struct dce_bw_output { bool all_displays_in_sync; struct dce_watermarks urgent_wm_ns[MAX_PIPES]; struct dce_watermarks stutter_exit_wm_ns[MAX_PIPES]; + struct dce_watermarks stutter_entry_wm_ns[MAX_PIPES]; struct dce_watermarks nbp_state_change_wm_ns[MAX_PIPES]; int sclk_khz; int sclk_deep_sleep_khz; -- cgit v1.2.1 From f8931ea730ffa6c84e98c970c173935cfd38c0aa Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 12 Mar 2018 17:07:24 -0400 Subject: drm/amd/display: Change wb_h/vratio to double Signed-off-by: Eric Bernstein Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index 09affa16cc43..e296de6ca502 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -215,8 +215,8 @@ struct writeback_st { int wb_vtaps_luma; int wb_htaps_chroma; int wb_vtaps_chroma; - int wb_hratio; - int wb_vratio; + double wb_hratio; + double wb_vratio; }; struct _vcs_dpi_display_output_params_st { -- cgit v1.2.1 From 6133470c8e2ffdc6a5d67a1d79a9a0c1c0a94a10 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Tue, 13 Mar 2018 15:53:13 -0400 Subject: drm/amd/display: drop dc_validate_guaranteed Block FP16 scaling in validate_resources codepath. Signed-off-by: Julian Parkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 32 -------------- drivers/gpu/drm/amd/display/dc/dc_stream.h | 8 ---- .../drm/amd/display/dc/dce100/dce100_resource.c | 33 --------------- .../drm/amd/display/dc/dce110/dce110_resource.c | 33 --------------- .../drm/amd/display/dc/dce112/dce112_resource.c | 33 --------------- .../drm/amd/display/dc/dce112/dce112_resource.h | 5 --- .../drm/amd/display/dc/dce120/dce120_resource.c | 1 - .../gpu/drm/amd/display/dc/dce80/dce80_resource.c | 49 ---------------------- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 7 ++++ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 31 -------------- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 5 --- drivers/gpu/drm/amd/display/dc/inc/resource.h | 4 -- 12 files changed, 7 insertions(+), 234 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index ba3487e97361..cae78ee9a6fc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1599,18 +1599,6 @@ enum dc_status dc_remove_stream_from_ctx( return DC_OK; } -static void copy_pipe_ctx( - const struct pipe_ctx *from_pipe_ctx, struct pipe_ctx *to_pipe_ctx) -{ - struct dc_plane_state *plane_state = to_pipe_ctx->plane_state; - struct dc_stream_state *stream = to_pipe_ctx->stream; - - *to_pipe_ctx = *from_pipe_ctx; - to_pipe_ctx->stream = stream; - if (plane_state != NULL) - to_pipe_ctx->plane_state = plane_state; -} - static struct dc_stream_state *find_pll_sharable_stream( struct dc_stream_state *stream_needs_pll, struct dc_state *context) @@ -1752,26 +1740,6 @@ enum dc_status resource_map_pool_resources( return DC_ERROR_UNEXPECTED; } -/* first stream in the context is used to populate the rest */ -void validate_guaranteed_copy_streams( - struct dc_state *context, - int max_streams) -{ - int i; - - for (i = 1; i < max_streams; i++) { - context->streams[i] = context->streams[0]; - - copy_pipe_ctx(&context->res_ctx.pipe_ctx[0], - &context->res_ctx.pipe_ctx[i]); - context->res_ctx.pipe_ctx[i].stream = - context->res_ctx.pipe_ctx[0].stream; - - dc_stream_retain(context->streams[i]); - context->stream_count++; - } -} - void dc_resource_state_copy_construct_current( const struct dc *dc, struct dc_state *dst_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index d017df56b2ba..3a7093ede569 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -208,14 +208,6 @@ bool dc_add_all_planes_for_stream( enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream); -/* - * This function takes a stream and checks if it is guaranteed to be supported. - * Guaranteed means that MAX_COFUNC similar streams are supported. - * - * After this call: - * No hardware is programmed for call. Only validation is done. - */ - /* * Set up streams and links associated to drive sinks * The streams parameter is an absolute set of all active streams. diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 3092f76bdb75..38ec0d609297 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -733,38 +733,6 @@ enum dc_status dce100_add_stream_to_ctx( return result; } -enum dc_status dce100_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *dc_stream, - struct dc_state *context) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - context->streams[0] = dc_stream; - dc_stream_retain(context->streams[0]); - context->stream_count++; - - result = resource_map_pool_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = resource_map_clock_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, context, dc_stream); - - if (result == DC_OK) { - validate_guaranteed_copy_streams( - context, dc->caps.max_streams); - result = resource_build_scaling_params_for_context(dc, context); - } - - if (result == DC_OK) - if (!dce100_validate_bandwidth(dc, context)) - result = DC_FAIL_BANDWIDTH_VALIDATE; - - return result; -} - static void dce100_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); @@ -786,7 +754,6 @@ enum dc_status dce100_validate_plane(const struct dc_plane_state *plane_state, s static const struct resource_funcs dce100_res_pool_funcs = { .destroy = dce100_destroy_resource_pool, .link_enc_create = dce100_link_encoder_create, - .validate_guaranteed = dce100_validate_guaranteed, .validate_bandwidth = dce100_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index b1f14be20fdf..ee33786bdef6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -930,38 +930,6 @@ static enum dc_status dce110_add_stream_to_ctx( return result; } -static enum dc_status dce110_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *dc_stream, - struct dc_state *context) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - context->streams[0] = dc_stream; - dc_stream_retain(context->streams[0]); - context->stream_count++; - - result = resource_map_pool_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = resource_map_clock_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, context, dc_stream); - - if (result == DC_OK) { - validate_guaranteed_copy_streams( - context, dc->caps.max_streams); - result = resource_build_scaling_params_for_context(dc, context); - } - - if (result == DC_OK) - if (!dce110_validate_bandwidth(dc, context)) - result = DC_FAIL_BANDWIDTH_VALIDATE; - - return result; -} - static struct pipe_ctx *dce110_acquire_underlay( struct dc_state *context, const struct resource_pool *pool, @@ -1036,7 +1004,6 @@ static void dce110_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce110_res_pool_funcs = { .destroy = dce110_destroy_resource_pool, .link_enc_create = dce110_link_encoder_create, - .validate_guaranteed = dce110_validate_guaranteed, .validate_bandwidth = dce110_validate_bandwidth, .validate_plane = dce110_validate_plane, .acquire_idle_pipe_for_layer = dce110_acquire_underlay, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index cd1e3f72c44e..0a476636c5c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -867,38 +867,6 @@ enum dc_status dce112_add_stream_to_ctx( return result; } -enum dc_status dce112_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - context->streams[0] = stream; - dc_stream_retain(context->streams[0]); - context->stream_count++; - - result = resource_map_pool_resources(dc, context, stream); - - if (result == DC_OK) - result = resource_map_phy_clock_resources(dc, context, stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, context, stream); - - if (result == DC_OK) { - validate_guaranteed_copy_streams( - context, dc->caps.max_streams); - result = resource_build_scaling_params_for_context(dc, context); - } - - if (result == DC_OK) - if (!dce112_validate_bandwidth(dc, context)) - result = DC_FAIL_BANDWIDTH_VALIDATE; - - return result; -} - enum dc_status dce112_validate_global( struct dc *dc, struct dc_state *context) @@ -921,7 +889,6 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce112_res_pool_funcs = { .destroy = dce112_destroy_resource_pool, .link_enc_create = dce112_link_encoder_create, - .validate_guaranteed = dce112_validate_guaranteed, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h index d5c19d34eb0a..95a403396219 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h @@ -42,11 +42,6 @@ enum dc_status dce112_validate_with_context( struct dc_state *context, struct dc_state *old_context); -enum dc_status dce112_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *dc_stream, - struct dc_state *context); - bool dce112_validate_bandwidth( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 4659a4bfabaa..567e6b487877 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -684,7 +684,6 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce120_res_pool_funcs = { .destroy = dce120_destroy_resource_pool, .link_enc_create = dce120_link_encoder_create, - .validate_guaranteed = dce112_validate_guaranteed, .validate_bandwidth = dce112_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce112_add_stream_to_ctx diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 5d854a37a978..48a068964722 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -691,23 +691,6 @@ static void destruct(struct dce110_resource_pool *pool) } } -static enum dc_status build_mapped_resource( - const struct dc *dc, - struct dc_state *context, - struct dc_stream_state *stream) -{ - struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); - - if (!pipe_ctx) - return DC_ERROR_UNEXPECTED; - - dce110_resource_build_pipe_hw_param(pipe_ctx); - - resource_build_info_frame(pipe_ctx); - - return DC_OK; -} - bool dce80_validate_bandwidth( struct dc *dc, struct dc_state *context) @@ -749,37 +732,6 @@ enum dc_status dce80_validate_global( return DC_OK; } -enum dc_status dce80_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *dc_stream, - struct dc_state *context) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - context->streams[0] = dc_stream; - dc_stream_retain(context->streams[0]); - context->stream_count++; - - result = resource_map_pool_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = resource_map_clock_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, context, dc_stream); - - if (result == DC_OK) { - validate_guaranteed_copy_streams( - context, dc->caps.max_streams); - result = resource_build_scaling_params_for_context(dc, context); - } - - if (result == DC_OK) - result = dce80_validate_bandwidth(dc, context); - - return result; -} - static void dce80_destroy_resource_pool(struct resource_pool **pool) { struct dce110_resource_pool *dce110_pool = TO_DCE110_RES_POOL(*pool); @@ -792,7 +744,6 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool) static const struct resource_funcs dce80_res_pool_funcs = { .destroy = dce80_destroy_resource_pool, .link_enc_create = dce80_link_encoder_create, - .validate_guaranteed = dce80_validate_guaranteed, .validate_bandwidth = dce80_validate_bandwidth, .validate_plane = dce100_validate_plane, .add_stream_to_ctx = dce100_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 3356125a6117..5f40a7374c02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -121,6 +121,13 @@ bool dpp_get_optimal_number_of_taps( else pixel_width = scl_data->viewport.width; + /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ + if (scl_data->viewport.width != scl_data->h_active && + scl_data->viewport.height != scl_data->v_active && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + /* TODO: add lb check */ /* No support for programming ratio of 4, drop to 3.99999.. */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 02bd664aed3e..a3fe343b4a85 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -918,36 +918,6 @@ enum dc_status dcn10_add_stream_to_ctx( return result; } -enum dc_status dcn10_validate_guaranteed( - struct dc *dc, - struct dc_stream_state *dc_stream, - struct dc_state *context) -{ - enum dc_status result = DC_ERROR_UNEXPECTED; - - context->streams[0] = dc_stream; - dc_stream_retain(context->streams[0]); - context->stream_count++; - - result = resource_map_pool_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = resource_map_phy_clock_resources(dc, context, dc_stream); - - if (result == DC_OK) - result = build_mapped_resource(dc, context, dc_stream); - - if (result == DC_OK) { - validate_guaranteed_copy_streams( - context, dc->caps.max_streams); - result = resource_build_scaling_params_for_context(dc, context); - } - if (result == DC_OK && !dcn_validate_bandwidth(dc, context)) - return DC_FAIL_BANDWIDTH_VALIDATE; - - return result; -} - static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( struct dc_state *context, const struct resource_pool *pool, @@ -1233,7 +1203,6 @@ static struct dc_cap_funcs cap_funcs = { static struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, - .validate_guaranteed = dcn10_validate_guaranteed, .validate_bandwidth = dcn_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 55f56bf7d5b6..a94942d4e66b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -95,11 +95,6 @@ struct resource_funcs { struct link_encoder *(*link_enc_create)( const struct encoder_init_data *init); - enum dc_status (*validate_guaranteed)( - struct dc *dc, - struct dc_stream_state *stream, - struct dc_state *context); - bool (*validate_bandwidth)( struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 5467332faf7b..640a647f4611 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -139,10 +139,6 @@ bool resource_validate_attach_surfaces( struct dc_state *context, const struct resource_pool *pool); -void validate_guaranteed_copy_streams( - struct dc_state *context, - int max_streams); - void resource_validate_ctx_update_pointer_after_copy( const struct dc_state *src_ctx, struct dc_state *dst_ctx); -- cgit v1.2.1 From 3722c794641f91e0b960dd901d6c5d2f3cc24080 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 21 Feb 2018 16:57:10 -0500 Subject: drm/amd/display: Implementing new bandwidth registers for DCE120 Registers are added and defined. Programmed to default values. Stutter level watermark register is being set to calculated value. Urgent level registers are programmed to the same as urgency. The programming of the registers is not expected to have any functional difference in performance. Signed-off-by: Mikita Lipski Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 58 +++++++++++++++------- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h | 9 ++++ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 3 ++ .../drm/amd/display/dc/dce110/dce110_mem_input_v.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h | 1 + 5 files changed, 55 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 0790f25c7b3b..04fc86bb95a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -174,6 +174,25 @@ static void program_urgency_watermark( URGENCY_HIGH_WATERMARK, urgency_high_wm); } +static void dce120_program_urgency_watermark( + struct dce_mem_input *dce_mi, + uint32_t wm_select, + uint32_t urgency_low_wm, + uint32_t urgency_high_wm) +{ + REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, + URGENCY_WATERMARK_MASK, wm_select); + + REG_SET_2(DPG_PIPE_URGENCY_CONTROL, 0, + URGENCY_LOW_WATERMARK, urgency_low_wm, + URGENCY_HIGH_WATERMARK, urgency_high_wm); + + REG_SET_2(DPG_PIPE_URGENT_LEVEL_CONTROL, 0, + URGENT_LEVEL_LOW_WATERMARK, urgency_low_wm, + URGENT_LEVEL_HIGH_WATERMARK, urgency_high_wm); + +} + static void program_nbp_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, @@ -209,23 +228,27 @@ static void program_nbp_watermark( static void program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, - uint32_t stutter_mark) + uint32_t stutter_mark, + uint32_t stutter_entry) { REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); if (REG(DPG_PIPE_STUTTER_CONTROL2)) - REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2, - STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); + REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL2, + STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark, + STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry); else - REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, - STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); + REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, + STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark, + STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry); } static void dce_mi_program_display_marks( struct mem_input *mi, struct dce_watermarks nbp, - struct dce_watermarks stutter, + struct dce_watermarks stutter_exit, + struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { @@ -243,26 +266,27 @@ static void dce_mi_program_display_marks( program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ - program_stutter_watermark(dce_mi, 2, stutter.a_mark); /* set a */ - program_stutter_watermark(dce_mi, 1, stutter.d_mark); /* set d */ + program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark, stutter_enter.a_mark); /* set a */ + program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark, stutter_enter.d_mark); /* set d */ } static void dce120_mi_program_display_marks(struct mem_input *mi, struct dce_watermarks nbp, - struct dce_watermarks stutter, + struct dce_watermarks stutter_exit, + struct dce_watermarks stutter_entry, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; - program_urgency_watermark(dce_mi, 0, /* set a */ + dce120_program_urgency_watermark(dce_mi, 0, /* set a */ urgent.a_mark, total_dest_line_time_ns); - program_urgency_watermark(dce_mi, 1, /* set b */ + dce120_program_urgency_watermark(dce_mi, 1, /* set b */ urgent.b_mark, total_dest_line_time_ns); - program_urgency_watermark(dce_mi, 2, /* set c */ + dce120_program_urgency_watermark(dce_mi, 2, /* set c */ urgent.c_mark, total_dest_line_time_ns); - program_urgency_watermark(dce_mi, 3, /* set d */ + dce120_program_urgency_watermark(dce_mi, 3, /* set d */ urgent.d_mark, total_dest_line_time_ns); REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, @@ -273,10 +297,10 @@ static void dce120_mi_program_display_marks(struct mem_input *mi, program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ - program_stutter_watermark(dce_mi, 0, stutter.a_mark); /* set a */ - program_stutter_watermark(dce_mi, 1, stutter.b_mark); /* set b */ - program_stutter_watermark(dce_mi, 2, stutter.c_mark); /* set c */ - program_stutter_watermark(dce_mi, 3, stutter.d_mark); /* set d */ + program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */ + program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */ + program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */ + program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */ } static void program_tiling( diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index 05d39c0cbe87..e877e7329e8f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -106,6 +106,7 @@ struct dce_mem_input_registers { uint32_t DPG_PIPE_ARBITRATION_CONTROL1; uint32_t DPG_WATERMARK_MASK_CONTROL; uint32_t DPG_PIPE_URGENCY_CONTROL; + uint32_t DPG_PIPE_URGENT_LEVEL_CONTROL; uint32_t DPG_PIPE_NB_PSTATE_CHANGE_CONTROL; uint32_t DPG_PIPE_LOW_POWER_CONTROL; uint32_t DPG_PIPE_STUTTER_CONTROL; @@ -213,6 +214,11 @@ struct dce_mem_input_registers { #define MI_DCE12_DMIF_PG_MASK_SH_LIST(mask_sh, blk)\ SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_EXIT_SELF_REFRESH_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_STUTTER_CONTROL2, STUTTER_ENTER_SELF_REFRESH_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_LOW_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_URGENT_LEVEL_CONTROL, URGENT_LEVEL_HIGH_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, mask_sh),\ + SFB(blk, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, mask_sh),\ SFB(blk, DPG_WATERMARK_MASK_CONTROL, PSTATE_CHANGE_WATERMARK_MASK, mask_sh),\ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_ENABLE, mask_sh),\ SFB(blk, DPG_PIPE_LOW_POWER_CONTROL, PSTATE_CHANGE_URGENT_DURING_REQUEST, mask_sh),\ @@ -286,6 +292,8 @@ struct dce_mem_input_registers { type STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK; \ type URGENCY_LOW_WATERMARK; \ type URGENCY_HIGH_WATERMARK; \ + type URGENT_LEVEL_LOW_WATERMARK;\ + type URGENT_LEVEL_HIGH_WATERMARK;\ type NB_PSTATE_CHANGE_ENABLE; \ type NB_PSTATE_CHANGE_URGENT_DURING_REQUEST; \ type NB_PSTATE_CHANGE_NOT_SELF_REFRESH_DURING_REQUEST; \ @@ -297,6 +305,7 @@ struct dce_mem_input_registers { type STUTTER_ENABLE; \ type STUTTER_IGNORE_FBC; \ type STUTTER_EXIT_SELF_REFRESH_WATERMARK; \ + type STUTTER_ENTER_SELF_REFRESH_WATERMARK; \ type DMIF_BUFFERS_ALLOCATED; \ type DMIF_BUFFERS_ALLOCATION_COMPLETED; \ type ENABLE; /* MC_HUB_RDREQ_DMIF_LIMIT */\ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 30dd62f0f5fa..daa4673675f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1539,6 +1539,7 @@ static void dce110_set_displaymarks( pipe_ctx->plane_res.mi, context->bw.dce.nbp_state_change_wm_ns[num_pipes], context->bw.dce.stutter_exit_wm_ns[num_pipes], + context->bw.dce.stutter_entry_wm_ns[num_pipes], context->bw.dce.urgent_wm_ns[num_pipes], total_dest_line_time_ns); if (i == underlay_idx) { @@ -1564,6 +1565,7 @@ static void set_safe_displaymarks( MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK, MAX_WATERMARK }; struct dce_watermarks nbp_marks = { SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK, SAFE_NBP_MARK }; + struct dce_watermarks min_marks = { 0, 0, 0, 0}; for (i = 0; i < MAX_PIPES; i++) { if (res_ctx->pipe_ctx[i].stream == NULL || res_ctx->pipe_ctx[i].plane_res.mi == NULL) @@ -1573,6 +1575,7 @@ static void set_safe_displaymarks( res_ctx->pipe_ctx[i].plane_res.mi, nbp_marks, max_marks, + min_marks, max_marks, MAX_WATERMARK); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c index 7bab8c6d2a73..0564c8e31252 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c @@ -923,6 +923,7 @@ void dce_mem_input_v_program_display_marks( struct mem_input *mem_input, struct dce_watermarks nbp, struct dce_watermarks stutter, + struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns) { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index 3e1e7e6a8792..47f1dc5a43b7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -104,6 +104,7 @@ struct mem_input_funcs { struct mem_input *mem_input, struct dce_watermarks nbp, struct dce_watermarks stutter, + struct dce_watermarks stutter_enter, struct dce_watermarks urgent, uint32_t total_dest_line_time_ns); -- cgit v1.2.1 From 8a6095e08e4ae279de570b175e5aee525ae1251e Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Wed, 21 Feb 2018 16:42:05 -0500 Subject: drm/amd/display: dal 3.1.40 Signed-off-by: Tony Cheng Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bdc3cef002d6..63817ed56c11 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.39" +#define DC_VER "3.1.40" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From a052a516de4c3e46f2e442ec118c391dbf9932e3 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 13 Mar 2018 15:00:20 -0400 Subject: drm/amd/display: align dtn logs and add mpc idle bit print Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 53 ++++++++++------------ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 4 +- drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h | 2 + 3 files changed, 28 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 999190aa8a08..675a81a87099 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -56,16 +56,17 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name +/*print is 17 wide, first two characters are spaces*/ #define DTN_INFO_MICRO_SEC(ref_cycle) \ print_microsec(dc_ctx, ref_cycle) void print_microsec(struct dc_context *dc_ctx, uint32_t ref_cycle) { - static const uint32_t ref_clk_mhz = 48; - static const unsigned int frac = 10; + const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000; + static const unsigned int frac = 1000; uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz; - DTN_INFO("%d.%d \t ", + DTN_INFO(" %11d.%03d", us_x10 / frac, us_x10 % frac); } @@ -92,14 +93,14 @@ void dcn10_log_hubbub_state(struct dc *dc) hubbub1_wm_read_state(dc->res_pool->hubbub, &wm); - DTN_INFO("HUBBUB WM: \t data_urgent \t pte_meta_urgent \t " - "sr_enter \t sr_exit \t dram_clk_change \n"); + DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent" + " sr_enter sr_exit dram_clk_change\n"); for (i = 0; i < 4; i++) { struct dcn_hubbub_wm_set *s; s = &wm.sets[i]; - DTN_INFO("WM_Set[%d]:\t ", s->wm_set); + DTN_INFO("WM_Set[%d]:", s->wm_set); DTN_INFO_MICRO_SEC(s->data_urgent); DTN_INFO_MICRO_SEC(s->pte_meta_urgent); DTN_INFO_MICRO_SEC(s->sr_enter); @@ -121,19 +122,17 @@ void dcn10_log_hw_state(struct dc *dc) dcn10_log_hubbub_state(dc); - DTN_INFO("HUBP: format addr_hi width height " - "rotation mirror sw_mode " - "dcc_en blank_en ttu_dis underflow " - "min_ttu_vblank qos_low_wm qos_high_wm\n"); + DTN_INFO("HUBP: format addr_hi width height" + " rot mir sw_mode dcc_en blank_en ttu_dis underflow" + " min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state s; hubp1_read_state(TO_DCN10_HUBP(hubp), &s); - DTN_INFO("[%-2d]: %5xh %6xh %5d %6d " - "%7xh %5xh %6xh " - "%6d %8d %7d %8xh \t", + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" + " %6d %8d %7d %8xh", hubp->inst, s.pixel_format, s.inuse_addr_hi, @@ -152,25 +151,21 @@ void dcn10_log_hw_state(struct dc *dc) DTN_INFO("\n"); } DTN_INFO("\n"); + + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); for (i = 0; i < pool->pipe_count; i++) { - struct output_pixel_processor *opp = pool->opps[i]; - struct mpcc *mpcc = opp->mpc_tree_params.opp_list; struct mpcc_state s = {0}; - while (mpcc) { - ASSERT(opp->mpc_tree_params.opp_id == opp->inst); - pool->mpc->funcs->read_mpcc_state(pool->mpc, mpcc->mpcc_id, &s); - DTN_INFO("[OPP%d - MPCC%d]: DPP%d MPCCBOT%x MODE:%d ALPHA_MODE:%d PREMULT:%d OVERLAP_ONLY:%d\n", - s.opp_id, mpcc->mpcc_id, s.dpp_id, s.bot_mpcc_id, - s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only); - mpcc = mpcc->mpcc_bot; - ASSERT(!mpcc || mpcc->mpcc_id == s.bot_mpcc_id); - } + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle); } DTN_INFO("\n"); - DTN_INFO("OTG:\t v_bs \t v_be \t v_ss \t v_se \t vpol \t vmax \t vmin \t " - "h_bs \t h_be \t h_ss \t h_se \t hpol \t htot \t vtot \t underflow\n"); + DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin" + " h_bs h_be h_ss h_se hpol htot vtot underflow\n"); for (i = 0; i < pool->timing_generator_count; i++) { struct timing_generator *tg = pool->timing_generators[i]; @@ -182,9 +177,8 @@ void dcn10_log_hw_state(struct dc *dc) if ((s.otg_enabled & 1) == 0) continue; - DTN_INFO("[%d]:\t %d \t %d \t %d \t %d \t " - "%d \t %d \t %d \t %d \t %d \t %d \t " - "%d \t %d \t %d \t %d \t %d \t ", + DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %5d %5d %5d" + " %5d %5d %5d %5d %9d\n", tg->inst, s.v_blank_start, s.v_blank_end, @@ -201,7 +195,6 @@ void dcn10_log_hw_state(struct dc *dc) s.h_total, s.v_total, s.underflow_occurred_status); - DTN_INFO("\n"); } DTN_INFO("\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 29e15a93a7d0..6f7016a2a11e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -422,7 +422,9 @@ void mpc1_read_mpcc_state( REG_GET_4(MPCC_CONTROL[mpcc_inst], MPCC_MODE, &s->mode, MPCC_ALPHA_BLND_MODE, &s->alpha_mode, MPCC_ALPHA_MULTIPLIED_MODE, &s->pre_multiplied_alpha, - MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->pre_multiplied_alpha); + MPCC_BLND_ACTIVE_OVERLAP_ONLY, &s->overlap_only); + REG_GET_2(MPCC_STATUS[mpcc_inst], MPCC_IDLE, &s->idle, + MPCC_BUSY, &s->busy); } const struct mpc_funcs dcn10_mpc_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 5caacab216b5..caf74e3c836f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -113,6 +113,8 @@ struct mpcc_state { uint32_t alpha_mode; uint32_t pre_multiplied_alpha; uint32_t overlap_only; + uint32_t idle; + uint32_t busy; }; struct mpc_funcs { -- cgit v1.2.1 From 7a84077304e84d0254d505a76cc40971cb74c2de Mon Sep 17 00:00:00 2001 From: Roman Li Date: Wed, 14 Mar 2018 18:02:07 -0400 Subject: drm/amd/display: add assert in enable FBC Adding assert to prevent possible null deref warning. Only can happen under abnormal circumstances. Signed-off-by: Roman Li Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index daa4673675f1..075ab291cdc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1801,6 +1801,9 @@ static bool should_enable_fbc(struct dc *dc, } } + /* Pipe context should be found */ + ASSERT(pipe_ctx); + /* Only supports eDP */ if (pipe_ctx->stream->sink->link->connector_signal != SIGNAL_TYPE_EDP) return false; -- cgit v1.2.1 From d72eb20379022a948c219e1fc451b6b0200cc7c5 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 14 Mar 2018 14:42:25 -0400 Subject: drm/amd/display: Separate mem input constuctors for dce 112 and 120 Override the memory input functions for dce120 not to program new registers on dce112. This will fix warnings thrown on Polaris asics. Signed-off-by: Mikita Lipski Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c | 75 ++++++++++++++++++++-- drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h | 8 +++ .../drm/amd/display/dc/dce120/dce120_resource.c | 2 +- 3 files changed, 77 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 04fc86bb95a1..b235a75355b8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -225,7 +225,7 @@ static void program_nbp_watermark( } } -static void program_stutter_watermark( +static void dce120_program_stutter_watermark( struct dce_mem_input *dce_mi, uint32_t wm_select, uint32_t stutter_mark, @@ -244,6 +244,22 @@ static void program_stutter_watermark( STUTTER_ENTER_SELF_REFRESH_WATERMARK, stutter_entry); } +static void program_stutter_watermark( + struct dce_mem_input *dce_mi, + uint32_t wm_select, + uint32_t stutter_mark) +{ + REG_UPDATE(DPG_WATERMARK_MASK_CONTROL, + STUTTER_EXIT_SELF_REFRESH_WATERMARK_MASK, wm_select); + + if (REG(DPG_PIPE_STUTTER_CONTROL2)) + REG_UPDATE(DPG_PIPE_STUTTER_CONTROL2, + STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); + else + REG_UPDATE(DPG_PIPE_STUTTER_CONTROL, + STUTTER_EXIT_SELF_REFRESH_WATERMARK, stutter_mark); +} + static void dce_mi_program_display_marks( struct mem_input *mi, struct dce_watermarks nbp, @@ -266,8 +282,41 @@ static void dce_mi_program_display_marks( program_nbp_watermark(dce_mi, 2, nbp.a_mark); /* set a */ program_nbp_watermark(dce_mi, 1, nbp.d_mark); /* set d */ - program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark, stutter_enter.a_mark); /* set a */ - program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark, stutter_enter.d_mark); /* set d */ + program_stutter_watermark(dce_mi, 2, stutter_exit.a_mark); /* set a */ + program_stutter_watermark(dce_mi, 1, stutter_exit.d_mark); /* set d */ +} + +static void dce112_mi_program_display_marks(struct mem_input *mi, + struct dce_watermarks nbp, + struct dce_watermarks stutter_exit, + struct dce_watermarks stutter_entry, + struct dce_watermarks urgent, + uint32_t total_dest_line_time_ns) +{ + struct dce_mem_input *dce_mi = TO_DCE_MEM_INPUT(mi); + uint32_t stutter_en = mi->ctx->dc->debug.disable_stutter ? 0 : 1; + + program_urgency_watermark(dce_mi, 0, /* set a */ + urgent.a_mark, total_dest_line_time_ns); + program_urgency_watermark(dce_mi, 1, /* set b */ + urgent.b_mark, total_dest_line_time_ns); + program_urgency_watermark(dce_mi, 2, /* set c */ + urgent.c_mark, total_dest_line_time_ns); + program_urgency_watermark(dce_mi, 3, /* set d */ + urgent.d_mark, total_dest_line_time_ns); + + REG_UPDATE_2(DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE, stutter_en, + STUTTER_IGNORE_FBC, 1); + program_nbp_watermark(dce_mi, 0, nbp.a_mark); /* set a */ + program_nbp_watermark(dce_mi, 1, nbp.b_mark); /* set b */ + program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ + program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ + + program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark); /* set a */ + program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark); /* set b */ + program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark); /* set c */ + program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark); /* set d */ } static void dce120_mi_program_display_marks(struct mem_input *mi, @@ -297,10 +346,10 @@ static void dce120_mi_program_display_marks(struct mem_input *mi, program_nbp_watermark(dce_mi, 2, nbp.c_mark); /* set c */ program_nbp_watermark(dce_mi, 3, nbp.d_mark); /* set d */ - program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */ - program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */ - program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */ - program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */ + dce120_program_stutter_watermark(dce_mi, 0, stutter_exit.a_mark, stutter_entry.a_mark); /* set a */ + dce120_program_stutter_watermark(dce_mi, 1, stutter_exit.b_mark, stutter_entry.b_mark); /* set b */ + dce120_program_stutter_watermark(dce_mi, 2, stutter_exit.c_mark, stutter_entry.c_mark); /* set c */ + dce120_program_stutter_watermark(dce_mi, 3, stutter_exit.d_mark, stutter_entry.d_mark); /* set d */ } static void program_tiling( @@ -718,6 +767,18 @@ void dce112_mem_input_construct( const struct dce_mem_input_registers *regs, const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask) +{ + dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); + dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks; +} + +void dce120_mem_input_construct( + struct dce_mem_input *dce_mi, + struct dc_context *ctx, + int inst, + const struct dce_mem_input_registers *regs, + const struct dce_mem_input_shift *mi_shift, + const struct dce_mem_input_mask *mi_mask) { dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask); dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h index e877e7329e8f..d15b0d7f47fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h @@ -353,4 +353,12 @@ void dce112_mem_input_construct( const struct dce_mem_input_shift *mi_shift, const struct dce_mem_input_mask *mi_mask); +void dce120_mem_input_construct( + struct dce_mem_input *dce_mi, + struct dc_context *ctx, + int inst, + const struct dce_mem_input_registers *regs, + const struct dce_mem_input_shift *mi_shift, + const struct dce_mem_input_mask *mi_mask); + #endif /*__DCE_MEM_INPUT_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 567e6b487877..fda01574d1ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -652,7 +652,7 @@ static struct mem_input *dce120_mem_input_create( return NULL; } - dce112_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); + dce120_mem_input_construct(dce_mi, ctx, inst, &mi_regs[inst], &mi_shifts, &mi_masks); return &dce_mi->base; } -- cgit v1.2.1 From 4173c0bdd7b79ef46161037f8845654416dbaca9 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 14 Mar 2018 17:41:57 -0400 Subject: drm/amd/display: Only update mpc blend config if not full update The current mpcc insert/remove logic does not support updating only a single mpcc. So when pixel alpha changed but no full update we can mistakenly shuffle the mpcc layering order. With this change we will only insert/remove mpcc if there is full update. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 9 +++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c | 5 +++-- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 675a81a87099..27ae88e3a373 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1631,6 +1631,8 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) struct mpc *mpc = dc->res_pool->mpc; struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params); + + /* TODO: proper fix once fpga works */ if (dc->debug.surface_visual_confirm) @@ -1657,6 +1659,7 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) pipe_ctx->stream->output_color_space) && per_pixel_alpha; + /* * TODO: remove hack * Note: currently there is a bug in init_hw such that @@ -1667,6 +1670,12 @@ static void update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) */ mpcc_id = hubp->inst; + /* If there is no full update, don't need to touch MPC tree*/ + if (!pipe_ctx->plane_state->update_flags.bits.full_update) { + mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id); + return; + } + /* check if this MPCC is already being used */ new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id); /* remove MPCC if being used */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c index 6f7016a2a11e..9ca51ae46de7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c @@ -65,6 +65,7 @@ static void mpc1_update_blending( int mpcc_id) { struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); + struct mpcc *mpcc = mpc1_get_mpcc(mpc, mpcc_id); REG_UPDATE_5(MPCC_CONTROL[mpcc_id], MPCC_ALPHA_BLND_MODE, blnd_cfg->alpha_mode, @@ -74,6 +75,7 @@ static void mpc1_update_blending( MPCC_GLOBAL_GAIN, blnd_cfg->global_gain); mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id); + mpcc->blnd_cfg = *blnd_cfg; } void mpc1_update_stereo_mix( @@ -235,8 +237,7 @@ struct mpcc *mpc1_insert_plane( } /* update the blending configuration */ - new_mpcc->blnd_cfg = *blnd_cfg; - mpc->funcs->update_blending(mpc, &new_mpcc->blnd_cfg, mpcc_id); + mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id); /* update the stereo mix settings, if provided */ if (sm_cfg != NULL) { -- cgit v1.2.1 From 1ba2faf207b47e23b1d756e7be25e980724214a7 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 15 Mar 2018 10:25:43 -0400 Subject: drm/amd/display: hide inconsistent mpcc programming from dtn log Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 27ae88e3a373..e21458169d15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -157,10 +157,11 @@ void dcn10_log_hw_state(struct dc *dc) struct mpcc_state s = {0}; pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); - DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n", - i, s.opp_id, s.dpp_id, s.bot_mpcc_id, - s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, - s.idle); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n", + i, s.opp_id, s.dpp_id, s.bot_mpcc_id, + s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only, + s.idle); } DTN_INFO("\n"); -- cgit v1.2.1 From 24238ee6549bcbebca2f6fc49c225c4f377df8c0 Mon Sep 17 00:00:00 2001 From: Vitaly Prosyak Date: Tue, 13 Mar 2018 15:18:34 -0500 Subject: drm/amd/display: Add dc_lut_mode enum Signed-off-by: Vitaly Prosyak Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index b22158190262..015e209e58bc 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -191,4 +191,9 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA }; +enum dc_lut_mode { + LUT_BYPASS, + LUT_RAM_A, + LUT_RAM_B +}; #endif /* __DAL_HW_SHARED_H__ */ -- cgit v1.2.1 From 3032deb52a6bf706657c39d6335c81ce3265974d Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Wed, 14 Mar 2018 11:19:15 -0400 Subject: drm/amd/display: Correct print types in DC_LOGS Correct the types used for printing in logs. This is needed for adding dynamic printing (LINUX), otherwise we get warnings. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 74 +++++++++++----------- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 14 ++-- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 8 +-- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +- .../dc/i2caux/dce110/i2c_hw_engine_dce110.c | 2 +- 5 files changed, 52 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 4bb43a371292..a102c192328d 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -1459,39 +1459,39 @@ void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) void dcn_bw_sync_calcs_and_dml(struct dc *dc) { kernel_fpu_begin(); - DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %d ns\n" - "sr_enter_plus_exit_time: %d ns\n" - "urgent_latency: %d ns\n" - "write_back_latency: %d ns\n" - "percent_of_ideal_drambw_received_after_urg_latency: %d %\n" + DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" + "sr_enter_plus_exit_time: %f ns\n" + "urgent_latency: %f ns\n" + "write_back_latency: %f ns\n" + "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n" "max_request_size: %d bytes\n" - "dcfclkv_max0p9: %d kHz\n" - "dcfclkv_nom0p8: %d kHz\n" - "dcfclkv_mid0p72: %d kHz\n" - "dcfclkv_min0p65: %d kHz\n" - "max_dispclk_vmax0p9: %d kHz\n" - "max_dispclk_vnom0p8: %d kHz\n" - "max_dispclk_vmid0p72: %d kHz\n" - "max_dispclk_vmin0p65: %d kHz\n" - "max_dppclk_vmax0p9: %d kHz\n" - "max_dppclk_vnom0p8: %d kHz\n" - "max_dppclk_vmid0p72: %d kHz\n" - "max_dppclk_vmin0p65: %d kHz\n" - "socclk: %d kHz\n" - "fabric_and_dram_bandwidth_vmax0p9: %d MB/s\n" - "fabric_and_dram_bandwidth_vnom0p8: %d MB/s\n" - "fabric_and_dram_bandwidth_vmid0p72: %d MB/s\n" - "fabric_and_dram_bandwidth_vmin0p65: %d MB/s\n" - "phyclkv_max0p9: %d kHz\n" - "phyclkv_nom0p8: %d kHz\n" - "phyclkv_mid0p72: %d kHz\n" - "phyclkv_min0p65: %d kHz\n" - "downspreading: %d %\n" + "dcfclkv_max0p9: %f kHz\n" + "dcfclkv_nom0p8: %f kHz\n" + "dcfclkv_mid0p72: %f kHz\n" + "dcfclkv_min0p65: %f kHz\n" + "max_dispclk_vmax0p9: %f kHz\n" + "max_dispclk_vnom0p8: %f kHz\n" + "max_dispclk_vmid0p72: %f kHz\n" + "max_dispclk_vmin0p65: %f kHz\n" + "max_dppclk_vmax0p9: %f kHz\n" + "max_dppclk_vnom0p8: %f kHz\n" + "max_dppclk_vmid0p72: %f kHz\n" + "max_dppclk_vmin0p65: %f kHz\n" + "socclk: %f kHz\n" + "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n" + "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n" + "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n" + "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n" + "phyclkv_max0p9: %f kHz\n" + "phyclkv_nom0p8: %f kHz\n" + "phyclkv_mid0p72: %f kHz\n" + "phyclkv_min0p65: %f kHz\n" + "downspreading: %f %%\n" "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n" "urgent_out_of_order_return_per_channel: %d Bytes\n" "number_of_channels: %d\n" "vmm_page_size: %d Bytes\n" - "dram_clock_change_latency: %d ns\n" + "dram_clock_change_latency: %f ns\n" "return_bus_width: %d Bytes\n", dc->dcn_soc->sr_exit_time * 1000, dc->dcn_soc->sr_enter_plus_exit_time * 1000, @@ -1527,11 +1527,11 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc) dc->dcn_soc->vmm_page_size, dc->dcn_soc->dram_clock_change_latency * 1000, dc->dcn_soc->return_bus_width); - DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %d\n" - "det_buffer_size_in_kbyte: %d\n" - "dpp_output_buffer_pixels: %d\n" - "opp_output_buffer_lines: %d\n" - "pixel_chunk_size_in_kbyte: %d\n" + DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n" + "det_buffer_size_in_kbyte: %f\n" + "dpp_output_buffer_pixels: %f\n" + "opp_output_buffer_lines: %f\n" + "pixel_chunk_size_in_kbyte: %f\n" "pte_enable: %d\n" "pte_chunk_size: %d kbytes\n" "meta_chunk_size: %d kbytes\n" @@ -1550,13 +1550,13 @@ void dcn_bw_sync_calcs_and_dml(struct dc *dc) "max_pscl_tolb_throughput: %d pixels/dppclk\n" "max_lb_tovscl_throughput: %d pixels/dppclk\n" "max_vscl_tohscl_throughput: %d pixels/dppclk\n" - "max_hscl_ratio: %d\n" - "max_vscl_ratio: %d\n" + "max_hscl_ratio: %f\n" + "max_vscl_ratio: %f\n" "max_hscl_taps: %d\n" "max_vscl_taps: %d\n" "pte_buffer_size_in_requests: %d\n" - "dispclk_ramping_margin: %d %\n" - "under_scan_factor: %d %\n" + "dispclk_ramping_margin: %f %%\n" + "under_scan_factor: %f %%\n" "max_inter_dcn_tile_repeaters: %d\n" "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n" "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n" diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 5a552cb3f8a7..71cc60fcff5e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -72,8 +72,8 @@ void pre_surface_trace( "plane_state->visible = %d;\n" "plane_state->flip_immediate = %d;\n" "plane_state->address.type = %d;\n" - "plane_state->address.grph.addr.quad_part = 0x%X;\n" - "plane_state->address.grph.meta_addr.quad_part = 0x%X;\n" + "plane_state->address.grph.addr.quad_part = 0x%llX;\n" + "plane_state->address.grph.meta_addr.quad_part = 0x%llX;\n" "plane_state->scaling_quality.h_taps = %d;\n" "plane_state->scaling_quality.v_taps = %d;\n" "plane_state->scaling_quality.h_taps_c = %d;\n" @@ -192,8 +192,8 @@ void update_surface_trace( SURFACE_TRACE("Update %d\n", i); if (update->flip_addr) { SURFACE_TRACE("flip_addr->address.type = %d;\n" - "flip_addr->address.grph.addr.quad_part = 0x%X;\n" - "flip_addr->address.grph.meta_addr.quad_part = 0x%X;\n" + "flip_addr->address.grph.addr.quad_part = 0x%llX;\n" + "flip_addr->address.grph.meta_addr.quad_part = 0x%llX;\n" "flip_addr->flip_immediate = %d;\n", update->flip_addr->address.type, update->flip_addr->address.grph.addr.quad_part, @@ -211,7 +211,8 @@ void update_surface_trace( "plane_info->plane_size.grph.surface_size.width = %d;\n" "plane_info->plane_size.grph.surface_size.x = %d;\n" "plane_info->plane_size.grph.surface_size.y = %d;\n" - "plane_info->rotation = %d;\n", + "plane_info->rotation = %d;\n" + "plane_info->stereo_format = %d;\n", update->plane_info->color_space, update->plane_info->input_tf, update->plane_info->format, @@ -371,6 +372,7 @@ void context_clock_trace( context->bw.dcn.calc_clk.dppclk_khz, context->bw.dcn.calc_clk.dcfclk_khz, context->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, - context->bw.dcn.calc_clk.fclk_khz); + context->bw.dcn.calc_clk.fclk_khz, + context->bw.dcn.calc_clk.socclk_khz); #endif } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index eeb04471b2f5..82ee9de23115 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2165,11 +2165,11 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: 0x%x " + DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, - link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, @@ -2255,11 +2255,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { - DC_LOG_MST("stream_enc[%d]: 0x%x " + DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].vcp_id: %d " "stream[%d].slot_count: %d\n", i, - link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, i, link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 075ab291cdc7..c6212301712b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2777,13 +2777,13 @@ static void dce110_program_front_end_for_pipe( dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); DC_LOG_SURFACE( - "Pipe:%d 0x%x: addr hi:0x%x, " + "Pipe:%d %p: addr hi:0x%x, " "addr low:0x%x, " "src: %d, %d, %d," " %d; dst: %d, %d, %d, %d;" "clip: %d, %d, %d, %d\n", pipe_ctx->pipe_idx, - pipe_ctx->plane_state, + (void *) pipe_ctx->plane_state, pipe_ctx->plane_state->address.grph.addr.high_part, pipe_ctx->plane_state->address.grph.addr.low_part, pipe_ctx->plane_state->src_rect.x, diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c index abd0095ced30..b7256f595052 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/dce110/i2c_hw_engine_dce110.c @@ -527,7 +527,7 @@ static void construct( REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); if (xtal_ref_div == 0) { - DC_LOG_WARNING("Invalid base timer divider\n", + DC_LOG_WARNING("Invalid base timer divider [%s]\n", __func__); xtal_ref_div = 2; } -- cgit v1.2.1 From 8d815b4635382dddd58bf03bbcfac9f4e5201151 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Tue, 13 Mar 2018 16:40:51 -0400 Subject: drm/amd/display: Add num_active_wb to DML Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index e296de6ca502..ce750edc1e5f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -224,6 +224,7 @@ struct _vcs_dpi_display_output_params_st { int output_bpp; int dsc_enable; int wb_enable; + int num_active_wb; int opp_input_bpc; int output_type; int output_format; -- cgit v1.2.1 From 7608f8569d8fee1372d4a3409aea5cca0b13b194 Mon Sep 17 00:00:00 2001 From: Xingyue Tao Date: Wed, 14 Mar 2018 17:57:42 -0400 Subject: drm/amd/display: Add double buffer machanism to ICSC - Video playback shows tearing when adjusting brightness through radeon custom settings. - Now added double buffer mechanism to switch input CSC from register buffer ICSC and COMA - Improved tab alignment Signed-off-by: Xingyue Tao Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 11 +++++- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 44 ++++++++++++++-------- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 31 ++++++++++++++- 3 files changed, 67 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 17b062a8f88a..b81b2aa3c49f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -108,6 +108,8 @@ SRI(CM_DGAM_LUT_DATA, CM, id), \ SRI(CM_CONTROL, CM, id), \ SRI(CM_DGAM_CONTROL, CM, id), \ + SRI(CM_TEST_DEBUG_INDEX, CM, id), \ + SRI(CM_TEST_DEBUG_DATA, CM, id), \ SRI(FORMAT_CONTROL, CNVC_CFG, id), \ SRI(CNVC_SURFACE_PIXEL_FORMAT, CNVC_CFG, id), \ SRI(CURSOR0_CONTROL, CNVC_CUR, id), \ @@ -300,6 +302,7 @@ TF_SF(CM0_CM_DGAM_LUT_INDEX, CM_DGAM_LUT_INDEX, mask_sh), \ TF_SF(CM0_CM_DGAM_LUT_DATA, CM_DGAM_LUT_DATA, mask_sh), \ TF_SF(CM0_CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_TEST_DEBUG_INDEX, CM_TEST_DEBUG_INDEX, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, CNVC_BYPASS, mask_sh), \ TF2_SF(CNVC_CFG0, FORMAT_CONTROL__ALPHA_EN, mask_sh), \ TF_SF(CNVC_CFG0_FORMAT_CONTROL, FORMAT_EXPANSION_MODE, mask_sh), \ @@ -1010,6 +1013,8 @@ type CUR0_EXPANSION_MODE; \ type CUR0_ENABLE; \ type CM_BYPASS; \ + type CM_TEST_DEBUG_INDEX; \ + type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ type FORMAT_CONTROL__ALPHA_EN; \ type CUR0_COLOR0; \ type CUR0_COLOR1; \ @@ -1255,6 +1260,8 @@ struct dcn_dpp_mask { uint32_t CM_IGAM_LUT_RW_CONTROL; \ uint32_t CM_IGAM_LUT_RW_INDEX; \ uint32_t CM_IGAM_LUT_SEQ_COLOR; \ + uint32_t CM_TEST_DEBUG_INDEX; \ + uint32_t CM_TEST_DEBUG_DATA; \ uint32_t FORMAT_CONTROL; \ uint32_t CNVC_SURFACE_PIXEL_FORMAT; \ uint32_t CURSOR_CONTROL; \ @@ -1289,8 +1296,8 @@ struct dcn10_dpp { enum dcn10_input_csc_select { INPUT_CSC_SELECT_BYPASS = 0, - INPUT_CSC_SELECT_ICSC, - INPUT_CSC_SELECT_COMA + INPUT_CSC_SELECT_ICSC = 1, + INPUT_CSC_SELECT_COMA = 2 }; void dpp1_set_cursor_attributes( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index fb32975e4b67..cc511415caee 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -267,6 +267,7 @@ void dpp1_cm_set_output_csc_default( BREAK_TO_DEBUGGER(); return; } + dpp1_cm_program_color_matrix(dpp, regval); REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); } @@ -330,6 +331,7 @@ void dpp1_cm_set_output_csc_adjustment( { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); uint32_t ocsc_mode = 4; + dpp1_cm_program_color_matrix(dpp, regval); REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); } @@ -437,17 +439,18 @@ void dpp1_cm_program_regamma_lutb_settings( void dpp1_program_input_csc( struct dpp *dpp_base, enum dc_color_space color_space, - enum dcn10_input_csc_select select, + enum dcn10_input_csc_select input_select, const struct out_csc_color_matrix *tbl_entry) { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); int i; int arr_size = sizeof(dcn10_input_csc_matrix)/sizeof(struct dcn10_input_csc_matrix); const uint16_t *regval = NULL; - uint32_t selection = 1; + uint32_t cur_select = 0; + enum dcn10_input_csc_select select; struct color_matrices_reg gam_regs; - if (select == INPUT_CSC_SELECT_BYPASS) { + if (input_select == INPUT_CSC_SELECT_BYPASS) { REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0); return; } @@ -467,36 +470,45 @@ void dpp1_program_input_csc( regval = tbl_entry->regval; } - if (select == INPUT_CSC_SELECT_COMA) - selection = 2; - REG_SET(CM_ICSC_CONTROL, 0, - CM_ICSC_MODE, selection); + /* determine which CSC matrix (icsc or coma) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select); + + if (cur_select != INPUT_CSC_SELECT_ICSC) + select = INPUT_CSC_SELECT_ICSC; + else + select = INPUT_CSC_SELECT_COMA; gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11; gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11; gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12; gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12; - if (select == INPUT_CSC_SELECT_ICSC) { gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12); gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34); - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); } else { gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12); gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34); - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_ICSC_CONTROL, 0, + CM_ICSC_MODE, select); } //keep here for now, decide multi dce support later diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index a3fe343b4a85..d321da97217c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -319,12 +319,41 @@ static const struct dcn_dpp_registers tf_regs[] = { tf_regs(3), }; +/* + * + DCN1 CM debug status register definition + + register :ID9_CM_STATUS do + implement_ref :cm + map to: :cmdebugind, at: j + width 32 + disclosure NEVER + + field :ID9_VUPDATE_CFG, [0], R + field :ID9_IGAM_LUT_MODE, [2..1], R + field :ID9_BNS_BYPASS, [3], R + field :ID9_ICSC_MODE, [5..4], R + field :ID9_DGAM_LUT_MODE, [8..6], R + field :ID9_HDR_BYPASS, [9], R + field :ID9_GAMUT_REMAP_MODE, [11..10], R + field :ID9_RGAM_LUT_MODE, [14..12], R + #1 free bit + field :ID9_OCSC_MODE, [18..16], R + field :ID9_DENORM_MODE, [21..19], R + field :ID9_ROUND_TRUNC_MODE, [25..22], R + field :ID9_DITHER_EN, [26], R + field :ID9_DITHER_MODE, [28..27], R + end +*/ + static const struct dcn_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN10(__SHIFT) + TF_REG_LIST_SH_MASK_DCN10(__SHIFT), + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x4 }; static const struct dcn_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN10(_MASK), + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30 }; static const struct dcn_mpc_registers mpc_regs = { -- cgit v1.2.1 From f412e8307d0ac6cbffd1240fb655557c126a0f2c Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 15 Mar 2018 13:31:14 -0400 Subject: drm/amd/display: Couple bug fixes in stats module Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Reviewed-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/stats/stats.c | 39 ++++++++++++++++------- 1 file changed, 28 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 041f87b73d5f..ed5f6809a64e 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -187,7 +187,7 @@ void mod_stats_dump(struct mod_stats *mod_stats) for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { dm_logger_write(logger, LOG_PROFILING, - "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u\n", + "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", time[i].render_time_in_us, time[i].avg_render_time_in_us_last_ten, time[i].min_window, @@ -227,7 +227,7 @@ void mod_stats_reset_data(struct mod_stats *mod_stats) memset(core_stats->time, 0, sizeof(struct stats_time_cache) * core_stats->entries); - core_stats->index = 0; + core_stats->index = 1; } void mod_stats_update_flip(struct mod_stats *mod_stats, @@ -250,7 +250,7 @@ void mod_stats_update_flip(struct mod_stats *mod_stats, time[index].flip_timestamp_in_ns = timestamp_in_ns; time[index].render_time_in_us = - timestamp_in_ns - time[index - 1].flip_timestamp_in_ns; + (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000; if (index >= 10) { for (unsigned int i = 0; i < 10; i++) @@ -261,10 +261,12 @@ void mod_stats_update_flip(struct mod_stats *mod_stats, if (time[index].num_vsync_between_flips > 0) time[index].vsync_to_flip_time_in_us = - timestamp_in_ns - time[index].vupdate_timestamp_in_ns; + (timestamp_in_ns - + time[index].vupdate_timestamp_in_ns) / 1000; else time[index].vsync_to_flip_time_in_us = - timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns; + (timestamp_in_ns - + time[index - 1].vupdate_timestamp_in_ns) / 1000; core_stats->index++; } @@ -275,6 +277,8 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats, struct core_stats *core_stats = NULL; struct stats_time_cache *time = NULL; unsigned int index = 0; + unsigned int num_vsyncs = 0; + unsigned int prev_vsync_in_ns = 0; if (mod_stats == NULL) return; @@ -286,14 +290,27 @@ void mod_stats_update_vupdate(struct mod_stats *mod_stats, time = core_stats->time; index = core_stats->index; + num_vsyncs = time[index].num_vsync_between_flips; + + if (num_vsyncs < MOD_STATS_NUM_VSYNCS) { + if (num_vsyncs == 0) { + prev_vsync_in_ns = + time[index - 1].vupdate_timestamp_in_ns; + + time[index].flip_to_vsync_time_in_us = + (timestamp_in_ns - + time[index - 1].flip_timestamp_in_ns) / + 1000; + } else { + prev_vsync_in_ns = + time[index].vupdate_timestamp_in_ns; + } - time[index].vupdate_timestamp_in_ns = timestamp_in_ns; - if (time[index].num_vsync_between_flips < MOD_STATS_NUM_VSYNCS) - time[index].v_sync_time_in_us[time[index].num_vsync_between_flips] = - timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns; - time[index].flip_to_vsync_time_in_us = - timestamp_in_ns - time[index - 1].flip_timestamp_in_ns; + time[index].v_sync_time_in_us[num_vsyncs] = + (timestamp_in_ns - prev_vsync_in_ns) / 1000; + } + time[index].vupdate_timestamp_in_ns = timestamp_in_ns; time[index].num_vsync_between_flips++; } -- cgit v1.2.1 From e09b6473c605119a5f7c451a93a9e812e216a824 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 15 Mar 2018 14:18:18 -0400 Subject: drm/amd/display: Rename encoder_info_packet to dc_info_packet Move this out of the HW includes to dc_types.h Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Reviewed-by: Tony Cheng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 99 +++++++++++----------- drivers/gpu/drm/amd/display/dc/dc_types.h | 9 ++ .../drm/amd/display/dc/dce/dce_stream_encoder.c | 4 +- .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 21 ++--- 4 files changed, 65 insertions(+), 68 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index cae78ee9a6fc..379b05536321 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1811,7 +1811,7 @@ enum dc_status dc_validate_global_state( } static void patch_gamut_packet_checksum( - struct encoder_info_packet *gamut_packet) + struct dc_info_packet *gamut_packet) { /* For gamut we recalc checksum */ if (gamut_packet->valid) { @@ -1830,12 +1830,11 @@ static void patch_gamut_packet_checksum( } static void set_avi_info_frame( - struct encoder_info_packet *info_packet, + struct dc_info_packet *info_packet, struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; - struct info_frame info_frame = { {0} }; uint32_t pixel_encoding = 0; enum scanning_type scan_type = SCANNING_TYPE_NODATA; enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA; @@ -1845,7 +1844,7 @@ static void set_avi_info_frame( unsigned int cn0_cn1_value = 0; uint8_t *check_sum = NULL; uint8_t byte_index = 0; - union hdmi_info_packet *hdmi_info = &info_frame.avi_info_packet.info_packet_hdmi; + union hdmi_info_packet hdmi_info = {0}; union display_content_support support = {0}; unsigned int vic = pipe_ctx->stream->timing.vic; enum dc_timing_3d_format format; @@ -1856,11 +1855,11 @@ static void set_avi_info_frame( COLOR_SPACE_SRGB:COLOR_SPACE_YCBCR709; /* Initialize header */ - hdmi_info->bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI; + hdmi_info.bits.header.info_frame_type = HDMI_INFOFRAME_TYPE_AVI; /* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall * not be used in HDMI 2.0 (Section 10.1) */ - hdmi_info->bits.header.version = 2; - hdmi_info->bits.header.length = HDMI_AVI_INFOFRAME_SIZE; + hdmi_info.bits.header.version = 2; + hdmi_info.bits.header.length = HDMI_AVI_INFOFRAME_SIZE; /* * IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built @@ -1886,39 +1885,39 @@ static void set_avi_info_frame( /* Y0_Y1_Y2 : The pixel encoding */ /* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */ - hdmi_info->bits.Y0_Y1_Y2 = pixel_encoding; + hdmi_info.bits.Y0_Y1_Y2 = pixel_encoding; /* A0 = 1 Active Format Information valid */ - hdmi_info->bits.A0 = ACTIVE_FORMAT_VALID; + hdmi_info.bits.A0 = ACTIVE_FORMAT_VALID; /* B0, B1 = 3; Bar info data is valid */ - hdmi_info->bits.B0_B1 = BAR_INFO_BOTH_VALID; + hdmi_info.bits.B0_B1 = BAR_INFO_BOTH_VALID; - hdmi_info->bits.SC0_SC1 = PICTURE_SCALING_UNIFORM; + hdmi_info.bits.SC0_SC1 = PICTURE_SCALING_UNIFORM; /* S0, S1 : Underscan / Overscan */ /* TODO: un-hardcode scan type */ scan_type = SCANNING_TYPE_UNDERSCAN; - hdmi_info->bits.S0_S1 = scan_type; + hdmi_info.bits.S0_S1 = scan_type; /* C0, C1 : Colorimetry */ if (color_space == COLOR_SPACE_YCBCR709 || color_space == COLOR_SPACE_YCBCR709_LIMITED) - hdmi_info->bits.C0_C1 = COLORIMETRY_ITU709; + hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709; else if (color_space == COLOR_SPACE_YCBCR601 || color_space == COLOR_SPACE_YCBCR601_LIMITED) - hdmi_info->bits.C0_C1 = COLORIMETRY_ITU601; + hdmi_info.bits.C0_C1 = COLORIMETRY_ITU601; else { - hdmi_info->bits.C0_C1 = COLORIMETRY_NO_DATA; + hdmi_info.bits.C0_C1 = COLORIMETRY_NO_DATA; } if (color_space == COLOR_SPACE_2020_RGB_FULLRANGE || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE || color_space == COLOR_SPACE_2020_YCBCR) { - hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; - hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED; + hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_BT2020RGBYCBCR; + hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; } else if (color_space == COLOR_SPACE_ADOBERGB) { - hdmi_info->bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; - hdmi_info->bits.C0_C1 = COLORIMETRY_EXTENDED; + hdmi_info.bits.EC0_EC2 = COLORIMETRYEX_ADOBERGB; + hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED; } /* TODO: un-hardcode aspect ratio */ @@ -1927,18 +1926,18 @@ static void set_avi_info_frame( switch (aspect) { case ASPECT_RATIO_4_3: case ASPECT_RATIO_16_9: - hdmi_info->bits.M0_M1 = aspect; + hdmi_info.bits.M0_M1 = aspect; break; case ASPECT_RATIO_NO_DATA: case ASPECT_RATIO_64_27: case ASPECT_RATIO_256_135: default: - hdmi_info->bits.M0_M1 = 0; + hdmi_info.bits.M0_M1 = 0; } /* Active Format Aspect ratio - same as Picture Aspect Ratio. */ - hdmi_info->bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE; + hdmi_info.bits.R0_R3 = ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE; /* TODO: un-hardcode cn0_cn1 and itc */ @@ -1981,8 +1980,8 @@ static void set_avi_info_frame( } } } - hdmi_info->bits.CN0_CN1 = cn0_cn1_value; - hdmi_info->bits.ITC = itc_value; + hdmi_info.bits.CN0_CN1 = cn0_cn1_value; + hdmi_info.bits.ITC = itc_value; } /* TODO : We should handle YCC quantization */ @@ -1991,19 +1990,19 @@ static void set_avi_info_frame( stream->sink->edid_caps.qy_bit == 1) { if (color_space == COLOR_SPACE_SRGB || color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { - hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; - hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; } else if (color_space == COLOR_SPACE_SRGB_LIMITED || color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { - hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; - hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; } else { - hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; - hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; } } else { - hdmi_info->bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; - hdmi_info->bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_DEFAULT_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; } ///VIC @@ -2028,51 +2027,49 @@ static void set_avi_info_frame( break; } } - hdmi_info->bits.VIC0_VIC7 = vic; + hdmi_info.bits.VIC0_VIC7 = vic; /* pixel repetition * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel * repetition start from 1 */ - hdmi_info->bits.PR0_PR3 = 0; + hdmi_info.bits.PR0_PR3 = 0; /* Bar Info * barTop: Line Number of End of Top Bar. * barBottom: Line Number of Start of Bottom Bar. * barLeft: Pixel Number of End of Left Bar. * barRight: Pixel Number of Start of Right Bar. */ - hdmi_info->bits.bar_top = stream->timing.v_border_top; - hdmi_info->bits.bar_bottom = (stream->timing.v_total + hdmi_info.bits.bar_top = stream->timing.v_border_top; + hdmi_info.bits.bar_bottom = (stream->timing.v_total - stream->timing.v_border_bottom + 1); - hdmi_info->bits.bar_left = stream->timing.h_border_left; - hdmi_info->bits.bar_right = (stream->timing.h_total + hdmi_info.bits.bar_left = stream->timing.h_border_left; + hdmi_info.bits.bar_right = (stream->timing.h_total - stream->timing.h_border_right + 1); /* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */ - check_sum = &info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0]; + check_sum = &hdmi_info.packet_raw_data.sb[0]; *check_sum = HDMI_INFOFRAME_TYPE_AVI + HDMI_AVI_INFOFRAME_SIZE + 2; for (byte_index = 1; byte_index <= HDMI_AVI_INFOFRAME_SIZE; byte_index++) - *check_sum += hdmi_info->packet_raw_data.sb[byte_index]; + *check_sum += hdmi_info.packet_raw_data.sb[byte_index]; /* one byte complement */ *check_sum = (uint8_t) (0x100 - *check_sum); /* Store in hw_path_mode */ - info_packet->hb0 = hdmi_info->packet_raw_data.hb0; - info_packet->hb1 = hdmi_info->packet_raw_data.hb1; - info_packet->hb2 = hdmi_info->packet_raw_data.hb2; + info_packet->hb0 = hdmi_info.packet_raw_data.hb0; + info_packet->hb1 = hdmi_info.packet_raw_data.hb1; + info_packet->hb2 = hdmi_info.packet_raw_data.hb2; - for (byte_index = 0; byte_index < sizeof(info_frame.avi_info_packet. - info_packet_hdmi.packet_raw_data.sb); byte_index++) - info_packet->sb[byte_index] = info_frame.avi_info_packet. - info_packet_hdmi.packet_raw_data.sb[byte_index]; + for (byte_index = 0; byte_index < sizeof(hdmi_info.packet_raw_data.sb); byte_index++) + info_packet->sb[byte_index] = hdmi_info.packet_raw_data.sb[byte_index]; info_packet->valid = true; } static void set_vendor_info_packet( - struct encoder_info_packet *info_packet, + struct dc_info_packet *info_packet, struct dc_stream_state *stream) { uint32_t length = 0; @@ -2185,7 +2182,7 @@ static void set_vendor_info_packet( } static void set_spd_info_packet( - struct encoder_info_packet *info_packet, + struct dc_info_packet *info_packet, struct dc_stream_state *stream) { /* SPD info packet for FreeSync */ @@ -2306,7 +2303,7 @@ static void set_spd_info_packet( } static void set_hdr_static_info_packet( - struct encoder_info_packet *info_packet, + struct dc_info_packet *info_packet, struct dc_stream_state *stream) { uint16_t i = 0; @@ -2403,7 +2400,7 @@ static void set_hdr_static_info_packet( } static void set_vsc_info_packet( - struct encoder_info_packet *info_packet, + struct dc_info_packet *info_packet, struct dc_stream_state *stream) { unsigned int vscPacketRevision = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 9441305d3ab5..cd324bcc45e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -530,6 +530,15 @@ struct vrr_params { uint32_t frame_counter; }; +struct dc_info_packet { + bool valid; + uint8_t hb0; + uint8_t hb1; + uint8_t hb2; + uint8_t hb3; + uint8_t sb[32]; +}; + #define DC_PLANE_UPDATE_TIMES_MAX 10 struct dc_plane_flip_time { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 444558ca6533..b85fda5f38e8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -80,7 +80,7 @@ enum { static void dce110_update_generic_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, - const struct encoder_info_packet *info_packet) + const struct dc_info_packet *info_packet) { uint32_t regval; /* TODOFPGA Figure out a proper number for max_retries polling for lock @@ -196,7 +196,7 @@ static void dce110_update_generic_info_packet( static void dce110_update_hdmi_info_packet( struct dce110_stream_encoder *enc110, uint32_t packet_index, - const struct encoder_info_packet *info_packet) + const struct dc_info_packet *info_packet) { uint32_t cont, send, line; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index b5db1692393c..5c21336cae4c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -34,26 +34,17 @@ struct dc_bios; struct dc_context; struct dc_crtc_timing; -struct encoder_info_packet { - bool valid; - uint8_t hb0; - uint8_t hb1; - uint8_t hb2; - uint8_t hb3; - uint8_t sb[32]; -}; - struct encoder_info_frame { /* auxiliary video information */ - struct encoder_info_packet avi; - struct encoder_info_packet gamut; - struct encoder_info_packet vendor; + struct dc_info_packet avi; + struct dc_info_packet gamut; + struct dc_info_packet vendor; /* source product description */ - struct encoder_info_packet spd; + struct dc_info_packet spd; /* video stream configuration */ - struct encoder_info_packet vsc; + struct dc_info_packet vsc; /* HDR Static MetaData */ - struct encoder_info_packet hdrsmd; + struct dc_info_packet hdrsmd; }; struct encoder_unblank_param { -- cgit v1.2.1 From 6e5b3587dbf6aaf7f9eef4956a3fce12bf7e9ffa Mon Sep 17 00:00:00 2001 From: SivapiriyanKumarasamy Date: Wed, 14 Mar 2018 09:15:24 -0400 Subject: drm/amd/display: Add vline IRQ programming for DCN Signed-off-by: SivapiriyanKumarasamy Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 6 ++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 4 ++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 76 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 14 ++++ .../drm/amd/display/dc/inc/hw/timing_generator.h | 3 + drivers/gpu/drm/amd/display/dc/irq_types.h | 7 ++ 6 files changed, 110 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 63a3d468939a..554cf975be05 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1375,6 +1375,12 @@ static void commit_planes_for_stream(struct dc *dc, pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); } + + if (stream_update && stream_update->periodic_fn_vsync_delta && + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt) + pipe_ctx->stream_res.tg->funcs->program_vline_interrupt( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, + pipe_ctx->stream->periodic_fn_vsync_delta); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 3a7093ede569..8d5161060b60 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -70,6 +70,9 @@ struct dc_stream_state { enum color_transfer_func output_tf; bool ignore_msa_timing_param; + + unsigned long long periodic_fn_vsync_delta; + /* TODO: custom INFO packets */ /* TODO: ABM info (DMCU) */ /* PSR info */ @@ -113,6 +116,7 @@ struct dc_stream_update { struct dc_hdr_static_metadata *hdr_static_metadata; enum color_transfer_func color_output_tf; unsigned int *abm_level; + unsigned long long *periodic_fn_vsync_delta; }; bool dc_is_stream_unchanged( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 4bf64d1b2c60..f56eac0e4dd2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -93,6 +93,81 @@ static void optc1_disable_stereo(struct timing_generator *optc) OTG_3D_STRUCTURE_STEREO_SEL_OVR, 0); } +static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t vesa_sync_start; + uint32_t asic_blank_end; + uint32_t interlace_factor; + uint32_t vertical_line_start; + + patched_crtc_timing = *dc_crtc_timing; + optc1_apply_front_porch_workaround(optc, &patched_crtc_timing); + + vesa_sync_start = patched_crtc_timing.h_addressable + + patched_crtc_timing.h_border_right + + patched_crtc_timing.h_front_porch; + + asic_blank_end = patched_crtc_timing.h_total - + vesa_sync_start - + patched_crtc_timing.h_border_left; + + interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1; + + vesa_sync_start = patched_crtc_timing.v_addressable + + patched_crtc_timing.v_border_bottom + + patched_crtc_timing.v_front_porch; + + asic_blank_end = (patched_crtc_timing.v_total - + vesa_sync_start - + patched_crtc_timing.v_border_top) + * interlace_factor; + + vertical_line_start = asic_blank_end - optc->dlg_otg_param.vstartup_start + 1; + if (vertical_line_start < 0) { + ASSERT(0); + vertical_line_start = 0; + } + + return vertical_line_start; +} + +void optc1_program_vline_interrupt( + struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + unsigned long long vsync_delta) +{ + + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + unsigned long long req_delta_tens_of_usec = div64_u64((vsync_delta + 9999), 10000); + unsigned long long pix_clk_hundreds_khz = div64_u64((dc_crtc_timing->pix_clk_khz + 99), 100); + uint32_t req_delta_lines = (uint32_t) div64_u64( + (req_delta_tens_of_usec * pix_clk_hundreds_khz + dc_crtc_timing->h_total - 1), + dc_crtc_timing->h_total); + + uint32_t vsync_line = get_start_vline(optc, dc_crtc_timing); + uint32_t start_line = 0; + uint32_t endLine = 0; + + if (req_delta_lines != 0) + req_delta_lines--; + + if (req_delta_lines > vsync_line) + start_line = dc_crtc_timing->v_total - (req_delta_lines - vsync_line) - 1; + else + start_line = vsync_line - req_delta_lines; + + endLine = start_line + 2; + + if (endLine >= dc_crtc_timing->v_total) + endLine = 2; + + REG_SET_2(OTG_VERTICAL_INTERRUPT0_POSITION, 0, + OTG_VERTICAL_INTERRUPT0_LINE_START, start_line, + OTG_VERTICAL_INTERRUPT0_LINE_END, endLine); +} + /** * program_timing_generator used by mode timing set * Program CRTC Timing Registers - OTG_H_*, OTG_V_*, Pixel repetition. @@ -1215,6 +1290,7 @@ static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc) static const struct timing_generator_funcs dcn10_tg_funcs = { .validate_timing = optc1_validate_timing, .program_timing = optc1_program_timing, + .program_vline_interrupt = optc1_program_vline_interrupt, .program_global_sync = optc1_program_global_sync, .enable_crtc = optc1_enable_crtc, .disable_crtc = optc1_disable_crtc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index d25e7bf0d0d7..5a9a73d69fd6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -65,6 +65,8 @@ SRI(OTG_NOM_VERT_POSITION, OTG, inst),\ SRI(OTG_BLACK_COLOR, OTG, inst),\ SRI(OTG_CLOCK_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_CONTROL, OTG, inst),\ + SRI(OTG_VERTICAL_INTERRUPT0_POSITION, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_CONTROL, OTG, inst),\ SRI(OTG_VERTICAL_INTERRUPT2_POSITION, OTG, inst),\ SRI(OPTC_INPUT_CLOCK_CONTROL, ODM, inst),\ @@ -124,6 +126,8 @@ struct dcn_optc_registers { uint32_t OTG_TEST_PATTERN_CONTROL; uint32_t OTG_TEST_PATTERN_COLOR; uint32_t OTG_CLOCK_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT0_CONTROL; + uint32_t OTG_VERTICAL_INTERRUPT0_POSITION; uint32_t OTG_VERTICAL_INTERRUPT2_CONTROL; uint32_t OTG_VERTICAL_INTERRUPT2_POSITION; uint32_t OPTC_INPUT_CLOCK_CONTROL; @@ -206,6 +210,9 @@ struct dcn_optc_registers { SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_EN, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_ON, mask_sh),\ SF(OTG0_OTG_CLOCK_CONTROL, OTG_CLOCK_GATE_DIS, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_START, mask_sh),\ + SF(OTG0_OTG_VERTICAL_INTERRUPT0_POSITION, OTG_VERTICAL_INTERRUPT0_LINE_END, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL, OTG_VERTICAL_INTERRUPT2_INT_ENABLE, mask_sh),\ SF(OTG0_OTG_VERTICAL_INTERRUPT2_POSITION, OTG_VERTICAL_INTERRUPT2_LINE_START, mask_sh),\ SF(ODM0_OPTC_INPUT_CLOCK_CONTROL, OPTC_INPUT_CLK_EN, mask_sh),\ @@ -323,6 +330,9 @@ struct dcn_optc_registers { type OTG_CLOCK_EN;\ type OTG_CLOCK_ON;\ type OTG_CLOCK_GATE_DIS;\ + type OTG_VERTICAL_INTERRUPT0_INT_ENABLE;\ + type OTG_VERTICAL_INTERRUPT0_LINE_START;\ + type OTG_VERTICAL_INTERRUPT0_LINE_END;\ type OTG_VERTICAL_INTERRUPT2_INT_ENABLE;\ type OTG_VERTICAL_INTERRUPT2_LINE_START;\ type OPTC_INPUT_CLK_EN;\ @@ -420,6 +430,10 @@ void optc1_program_timing( const struct dc_crtc_timing *dc_crtc_timing, bool use_vbios); +void optc1_program_vline_interrupt(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + unsigned long long vsync_delta); + void optc1_program_global_sync( struct timing_generator *optc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 3217b5bf6c7a..69cb0a105300 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -140,6 +140,9 @@ struct timing_generator_funcs { void (*program_timing)(struct timing_generator *tg, const struct dc_crtc_timing *timing, bool use_vbios); + void (*program_vline_interrupt)(struct timing_generator *optc, + const struct dc_crtc_timing *dc_crtc_timing, + unsigned long long vsync_delta); bool (*enable_crtc)(struct timing_generator *tg); bool (*disable_crtc)(struct timing_generator *tg); bool (*is_counter_moving)(struct timing_generator *tg); diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index a506c2e939f5..cc3b1bc6cedd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -135,6 +135,13 @@ enum dc_irq_source { DC_IRQ_SOURCE_VBLANK5, DC_IRQ_SOURCE_VBLANK6, + DC_IRQ_SOURCE_DC1_VLINE0, + DC_IRQ_SOURCE_DC2_VLINE0, + DC_IRQ_SOURCE_DC3_VLINE0, + DC_IRQ_SOURCE_DC4_VLINE0, + DC_IRQ_SOURCE_DC5_VLINE0, + DC_IRQ_SOURCE_DC6_VLINE0, + DAL_IRQ_SOURCES_NUMBER }; -- cgit v1.2.1 From 5813dd1c0c4e06b3321142cd2da99909a1f41707 Mon Sep 17 00:00:00 2001 From: Xingyue Tao Date: Fri, 16 Mar 2018 15:20:48 -0400 Subject: drm/amd/display: Add double buffer machanism to OCSC - Added double buffer mechanism to output CSC so that there's no tearing when adjusting brightness from Radeon settings Signed-off-by: Xingyue Tao Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 36 +++++++++++++++++ .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 46 +++++++++++++--------- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 32 ++------------- 3 files changed, 67 insertions(+), 47 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index b81b2aa3c49f..9b5ff76a8027 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -420,6 +420,41 @@ TF_SF(CURSOR0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \ TF_SF(DPP_TOP0_DPP_CONTROL, DPPCLK_RATE_CONTROL, mask_sh) +/* + * + DCN1 CM debug status register definition + + register :ID9_CM_STATUS do + implement_ref :cm + map to: :cmdebugind, at: j + width 32 + disclosure NEVER + + field :ID9_VUPDATE_CFG, [0], R + field :ID9_IGAM_LUT_MODE, [2..1], R + field :ID9_BNS_BYPASS, [3], R + field :ID9_ICSC_MODE, [5..4], R + field :ID9_DGAM_LUT_MODE, [8..6], R + field :ID9_HDR_BYPASS, [9], R + field :ID9_GAMUT_REMAP_MODE, [11..10], R + field :ID9_RGAM_LUT_MODE, [14..12], R + #1 free bit + field :ID9_OCSC_MODE, [18..16], R + field :ID9_DENORM_MODE, [21..19], R + field :ID9_ROUND_TRUNC_MODE, [25..22], R + field :ID9_DITHER_EN, [26], R + field :ID9_DITHER_MODE, [28..27], R + end +*/ + +#define TF_DEBUG_REG_LIST_SH_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 4, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 16 + +#define TF_DEBUG_REG_LIST_MASK_DCN10 \ + .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30, \ + .CM_TEST_DEBUG_DATA_ID9_OCSC_MODE = 0x70000 + #define TF_REG_FIELD_LIST(type) \ type EXT_OVERSCAN_LEFT; \ type EXT_OVERSCAN_RIGHT; \ @@ -1015,6 +1050,7 @@ type CM_BYPASS; \ type CM_TEST_DEBUG_INDEX; \ type CM_TEST_DEBUG_DATA_ID9_ICSC_MODE; \ + type CM_TEST_DEBUG_DATA_ID9_OCSC_MODE;\ type FORMAT_CONTROL__ALPHA_EN; \ type CUR0_COLOR0; \ type CUR0_COLOR1; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index cc511415caee..4f373c97804f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -216,41 +216,55 @@ static void dpp1_cm_program_color_matrix( struct dcn10_dpp *dpp, const uint16_t *regval) { - uint32_t mode; + uint32_t ocsc_mode; + uint32_t cur_mode; struct color_matrices_reg gam_regs; - REG_GET(CM_OCSC_CONTROL, CM_OCSC_MODE, &mode); - if (regval == NULL) { BREAK_TO_DEBUGGER(); return; } - mode = 4; + + /* determine which CSC matrix (ocsc or comb) we are using + * currently. select the alternate set to double buffer + * the CSC update so CSC is updated on frame boundary + */ + REG_SET(CM_TEST_DEBUG_INDEX, 0, + CM_TEST_DEBUG_INDEX, 9); + + REG_GET(CM_TEST_DEBUG_DATA, + CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode); + + if (cur_mode != 4) + ocsc_mode = 4; + else + ocsc_mode = 5; + + gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11; gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11; gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12; gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12; - if (mode == 4) { + if (ocsc_mode == 4) { gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12); gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34); - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); - } else { gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12); gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34); - cm_helper_program_color_matrices( - dpp->base.ctx, - regval, - &gam_regs); } + + cm_helper_program_color_matrices( + dpp->base.ctx, + regval, + &gam_regs); + + REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); + } void dpp1_cm_set_output_csc_default( @@ -260,7 +274,6 @@ void dpp1_cm_set_output_csc_default( struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); const uint16_t *regval = NULL; int arr_size; - uint32_t ocsc_mode = 4; regval = find_color_matrix(colorspace, &arr_size); if (regval == NULL) { @@ -269,7 +282,6 @@ void dpp1_cm_set_output_csc_default( } dpp1_cm_program_color_matrix(dpp, regval); - REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); } static void dpp1_cm_get_reg_field( @@ -330,10 +342,8 @@ void dpp1_cm_set_output_csc_adjustment( const uint16_t *regval) { struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); - uint32_t ocsc_mode = 4; dpp1_cm_program_color_matrix(dpp, regval); - REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode); } void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index d321da97217c..7ad290cbc730 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -319,41 +319,15 @@ static const struct dcn_dpp_registers tf_regs[] = { tf_regs(3), }; -/* - * - DCN1 CM debug status register definition - - register :ID9_CM_STATUS do - implement_ref :cm - map to: :cmdebugind, at: j - width 32 - disclosure NEVER - - field :ID9_VUPDATE_CFG, [0], R - field :ID9_IGAM_LUT_MODE, [2..1], R - field :ID9_BNS_BYPASS, [3], R - field :ID9_ICSC_MODE, [5..4], R - field :ID9_DGAM_LUT_MODE, [8..6], R - field :ID9_HDR_BYPASS, [9], R - field :ID9_GAMUT_REMAP_MODE, [11..10], R - field :ID9_RGAM_LUT_MODE, [14..12], R - #1 free bit - field :ID9_OCSC_MODE, [18..16], R - field :ID9_DENORM_MODE, [21..19], R - field :ID9_ROUND_TRUNC_MODE, [25..22], R - field :ID9_DITHER_EN, [26], R - field :ID9_DITHER_MODE, [28..27], R - end -*/ - static const struct dcn_dpp_shift tf_shift = { TF_REG_LIST_SH_MASK_DCN10(__SHIFT), - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x4 + TF_DEBUG_REG_LIST_SH_DCN10 + }; static const struct dcn_dpp_mask tf_mask = { TF_REG_LIST_SH_MASK_DCN10(_MASK), - .CM_TEST_DEBUG_DATA_ID9_ICSC_MODE = 0x30 + TF_DEBUG_REG_LIST_MASK_DCN10 }; static const struct dcn_mpc_registers mpc_regs = { -- cgit v1.2.1 From 87943159f4093d2dae22abccbe046dac0fbdad4f Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 15 Mar 2018 14:54:30 -0400 Subject: drm/amd/display: Only program MSA_TIMING_PARAM if it changed Signed-off-by: Harry Wentland Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 82ee9de23115..c18f24afa698 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1183,16 +1183,21 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - union down_spread_ctrl downspread; + union down_spread_ctrl old_downspread; + union down_spread_ctrl new_downspread; core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); + &old_downspread.raw, sizeof(old_downspread)); - downspread.bits.IGNORE_MSA_TIMING_PARAM = + new_downspread.raw = old_downspread.raw; + + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = (stream->ignore_msa_timing_param) ? 1 : 0; - core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, - &downspread.raw, sizeof(downspread)); + if (new_downspread.raw != old_downspread.raw) { + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &new_downspread.raw, sizeof(new_downspread)); + } } static enum dc_status enable_link_dp( -- cgit v1.2.1 From 44d09c6a577c8ed4e0ef50257487c071ae5e0fa2 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 15 Mar 2018 14:29:24 -0400 Subject: drm/amd/display: Move commit_planes_to_stream to amdgpu_dm Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 108 +++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/core/dc.c | 89 ------------------ drivers/gpu/drm/amd/display/dc/dc_stream.h | 7 -- 3 files changed, 103 insertions(+), 101 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index bad9f09c588b..3ff3905eee9a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3977,6 +3977,97 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, spin_unlock_irqrestore(&crtc->dev->event_lock, flags); } +/* + * TODO this whole function needs to go + * + * dc_surface_update is needlessly complex. See if we can just replace this + * with a dc_plane_state and follow the atomic model a bit more closely here. + */ +static bool commit_planes_to_stream( + struct dc *dc, + struct dc_plane_state **plane_states, + uint8_t new_plane_count, + struct dm_crtc_state *dm_new_crtc_state, + struct dm_crtc_state *dm_old_crtc_state, + struct dc_state *state) +{ + /* no need to dynamically allocate this. it's pretty small */ + struct dc_surface_update updates[MAX_SURFACES]; + struct dc_flip_addrs *flip_addr; + struct dc_plane_info *plane_info; + struct dc_scaling_info *scaling_info; + int i; + struct dc_stream_state *dc_stream = dm_new_crtc_state->stream; + struct dc_stream_update *stream_update = + kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); + + if (!stream_update) { + BREAK_TO_DEBUGGER(); + return false; + } + + flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs), + GFP_KERNEL); + plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info), + GFP_KERNEL); + scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info), + GFP_KERNEL); + + if (!flip_addr || !plane_info || !scaling_info) { + kfree(flip_addr); + kfree(plane_info); + kfree(scaling_info); + kfree(stream_update); + return false; + } + + memset(updates, 0, sizeof(updates)); + + stream_update->src = dc_stream->src; + stream_update->dst = dc_stream->dst; + stream_update->out_transfer_func = dc_stream->out_transfer_func; + + for (i = 0; i < new_plane_count; i++) { + updates[i].surface = plane_states[i]; + updates[i].gamma = + (struct dc_gamma *)plane_states[i]->gamma_correction; + updates[i].in_transfer_func = plane_states[i]->in_transfer_func; + flip_addr[i].address = plane_states[i]->address; + flip_addr[i].flip_immediate = plane_states[i]->flip_immediate; + plane_info[i].color_space = plane_states[i]->color_space; + plane_info[i].input_tf = plane_states[i]->input_tf; + plane_info[i].format = plane_states[i]->format; + plane_info[i].plane_size = plane_states[i]->plane_size; + plane_info[i].rotation = plane_states[i]->rotation; + plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror; + plane_info[i].stereo_format = plane_states[i]->stereo_format; + plane_info[i].tiling_info = plane_states[i]->tiling_info; + plane_info[i].visible = plane_states[i]->visible; + plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha; + plane_info[i].dcc = plane_states[i]->dcc; + scaling_info[i].scaling_quality = plane_states[i]->scaling_quality; + scaling_info[i].src_rect = plane_states[i]->src_rect; + scaling_info[i].dst_rect = plane_states[i]->dst_rect; + scaling_info[i].clip_rect = plane_states[i]->clip_rect; + + updates[i].flip_addr = &flip_addr[i]; + updates[i].plane_info = &plane_info[i]; + updates[i].scaling_info = &scaling_info[i]; + } + + dc_commit_updates_for_stream( + dc, + updates, + new_plane_count, + dc_stream, stream_update, plane_states, state); + + kfree(flip_addr); + kfree(plane_info); + kfree(scaling_info); + kfree(stream_update); + return true; +} + static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_device *dev, struct amdgpu_display_manager *dm, @@ -3992,6 +4083,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc_state *new_pcrtc_state = drm_atomic_get_new_crtc_state(state, pcrtc); struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); + struct dm_crtc_state *dm_old_crtc_state = + to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); struct dm_atomic_state *dm_state = to_dm_atomic_state(state); int planes_count = 0; unsigned long flags; @@ -4070,10 +4163,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - if (false == dc_commit_planes_to_stream(dm->dc, + + if (false == commit_planes_to_stream(dm->dc, plane_states_constructed, planes_count, - dc_stream_attach, + acrtc_state, + dm_old_crtc_state, dm_state->context)) dm_error("%s: Failed to attach plane!\n", __func__); } else { @@ -4298,8 +4393,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct dc_stream_status *status = NULL; - if (acrtc) + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } /* Skip any modesets/resets */ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) @@ -4322,11 +4419,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) WARN_ON(!status->plane_count); /*TODO How it works with MPO ?*/ - if (!dc_commit_planes_to_stream( + if (!commit_planes_to_stream( dm->dc, status->plane_states, status->plane_count, - dm_new_crtc_state->stream, + dm_new_crtc_state, + to_dm_crtc_state(old_crtc_state), dm_state->context)) dm_error("%s: Failed to update stream scaling!\n", __func__); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 554cf975be05..6f4ad67ffca6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -936,95 +936,6 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) return true; } -/* - * TODO this whole function needs to go - * - * dc_surface_update is needlessly complex. See if we can just replace this - * with a dc_plane_state and follow the atomic model a bit more closely here. - */ -bool dc_commit_planes_to_stream( - struct dc *dc, - struct dc_plane_state **plane_states, - uint8_t new_plane_count, - struct dc_stream_state *dc_stream, - struct dc_state *state) -{ - /* no need to dynamically allocate this. it's pretty small */ - struct dc_surface_update updates[MAX_SURFACES]; - struct dc_flip_addrs *flip_addr; - struct dc_plane_info *plane_info; - struct dc_scaling_info *scaling_info; - int i; - struct dc_stream_update *stream_update = - kzalloc(sizeof(struct dc_stream_update), GFP_KERNEL); - - if (!stream_update) { - BREAK_TO_DEBUGGER(); - return false; - } - - flip_addr = kcalloc(MAX_SURFACES, sizeof(struct dc_flip_addrs), - GFP_KERNEL); - plane_info = kcalloc(MAX_SURFACES, sizeof(struct dc_plane_info), - GFP_KERNEL); - scaling_info = kcalloc(MAX_SURFACES, sizeof(struct dc_scaling_info), - GFP_KERNEL); - - if (!flip_addr || !plane_info || !scaling_info) { - kfree(flip_addr); - kfree(plane_info); - kfree(scaling_info); - kfree(stream_update); - return false; - } - - memset(updates, 0, sizeof(updates)); - - stream_update->src = dc_stream->src; - stream_update->dst = dc_stream->dst; - stream_update->out_transfer_func = dc_stream->out_transfer_func; - - for (i = 0; i < new_plane_count; i++) { - updates[i].surface = plane_states[i]; - updates[i].gamma = - (struct dc_gamma *)plane_states[i]->gamma_correction; - updates[i].in_transfer_func = plane_states[i]->in_transfer_func; - flip_addr[i].address = plane_states[i]->address; - flip_addr[i].flip_immediate = plane_states[i]->flip_immediate; - plane_info[i].color_space = plane_states[i]->color_space; - plane_info[i].input_tf = plane_states[i]->input_tf; - plane_info[i].format = plane_states[i]->format; - plane_info[i].plane_size = plane_states[i]->plane_size; - plane_info[i].rotation = plane_states[i]->rotation; - plane_info[i].horizontal_mirror = plane_states[i]->horizontal_mirror; - plane_info[i].stereo_format = plane_states[i]->stereo_format; - plane_info[i].tiling_info = plane_states[i]->tiling_info; - plane_info[i].visible = plane_states[i]->visible; - plane_info[i].per_pixel_alpha = plane_states[i]->per_pixel_alpha; - plane_info[i].dcc = plane_states[i]->dcc; - scaling_info[i].scaling_quality = plane_states[i]->scaling_quality; - scaling_info[i].src_rect = plane_states[i]->src_rect; - scaling_info[i].dst_rect = plane_states[i]->dst_rect; - scaling_info[i].clip_rect = plane_states[i]->clip_rect; - - updates[i].flip_addr = &flip_addr[i]; - updates[i].plane_info = &plane_info[i]; - updates[i].scaling_info = &scaling_info[i]; - } - - dc_commit_updates_for_stream( - dc, - updates, - new_plane_count, - dc_stream, stream_update, plane_states, state); - - kfree(flip_addr); - kfree(plane_info); - kfree(scaling_info); - kfree(stream_update); - return true; -} - struct dc_state *dc_create_state(void) { struct dc_state *context = kzalloc(sizeof(struct dc_state), diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 8d5161060b60..2971cd07e093 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -135,13 +135,6 @@ bool dc_is_stream_scaling_unchanged( * This does not trigger a flip. No surface address is programmed. */ -bool dc_commit_planes_to_stream( - struct dc *dc, - struct dc_plane_state **plane_states, - uint8_t new_plane_count, - struct dc_stream_state *dc_stream, - struct dc_state *state); - void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, -- cgit v1.2.1 From 844de65e9108a03f2018a6bb827cc53bfa71693b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 23 Mar 2018 10:45:00 +0800 Subject: drm/amd/pp: Remove useless fw load error handler on Polaris Acked-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 997a777dd35b..fe6854eecf7b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -301,19 +301,11 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr) smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); /* Check if SMU is running in protected mode */ - if (smu_data->protected_mode == 0) { + if (smu_data->protected_mode == 0) result = polaris10_start_smu_in_non_protection_mode(hwmgr); - } else { + else result = polaris10_start_smu_in_protection_mode(hwmgr); - /* If failed, try with different security Key. */ - if (result != 0) { - smu_data->smu7_data.security_hard_key ^= 1; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); - result = polaris10_start_smu_in_protection_mode(hwmgr); - } - } - if (result != 0) PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); -- cgit v1.2.1 From ba8ab90e6ac9322f39ab8368941b38b5bb12477c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 22 Mar 2018 14:52:35 +0800 Subject: drm/amd/pp: Add hwmgr_sw_init/fini functioins Clean up pp ip functions Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 368 +++++---------------- .../gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 14 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 74 ++++- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 7 +- 4 files changed, 147 insertions(+), 316 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 7e8ad30d98e2..6503bbfdc76e 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,24 +31,11 @@ #include "amdgpu.h" #include "hwmgr.h" -#define PP_DPM_DISABLED 0xCCCC - static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, enum amd_pm_state_type *user_state); static const struct amd_pm_funcs pp_dpm_funcs; -static inline int pp_check(struct pp_hwmgr *hwmgr) -{ - if (hwmgr == NULL || hwmgr->smumgr_funcs == NULL) - return -EINVAL; - - if (hwmgr->pm_en == 0 || hwmgr->hwmgr_func == NULL) - return PP_DPM_DISABLED; - - return 0; -} - static int amd_powerplay_create(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr; @@ -73,7 +60,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev) } -static int amd_powerplay_destroy(struct amdgpu_device *adev) +static void amd_powerplay_destroy(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; @@ -82,8 +69,6 @@ static int amd_powerplay_destroy(struct amdgpu_device *adev) kfree(hwmgr); hwmgr = NULL; - - return 0; } static int pp_early_init(void *handle) @@ -109,18 +94,9 @@ static int pp_sw_init(void *handle) struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret >= 0) { - if (hwmgr->smumgr_funcs->smu_init == NULL) - return -EINVAL; + ret = hwmgr_sw_init(hwmgr); - ret = hwmgr->smumgr_funcs->smu_init(hwmgr); - - phm_register_irq_handlers(hwmgr); - - pr_debug("amdgpu: powerplay sw initialized\n"); - } + pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully"); return ret; } @@ -129,13 +105,8 @@ static int pp_sw_fini(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret >= 0) { - if (hwmgr->smumgr_funcs->smu_fini != NULL) - hwmgr->smumgr_funcs->smu_fini(hwmgr); - } + hwmgr_sw_fini(hwmgr); if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) amdgpu_ucode_fini_bo(adev); @@ -152,40 +123,20 @@ static int pp_hw_init(void *handle) if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) amdgpu_ucode_init_bo(adev); - ret = pp_check(hwmgr); + ret = hwmgr_hw_init(hwmgr); - if (ret >= 0) { - if (hwmgr->smumgr_funcs->start_smu == NULL) - return -EINVAL; + if (ret) + pr_err("powerplay hw init failed\n"); - if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { - pr_err("smc start failed\n"); - hwmgr->smumgr_funcs->smu_fini(hwmgr); - return -EINVAL; - } - if (ret == PP_DPM_DISABLED) - goto exit; - ret = hwmgr_hw_init(hwmgr); - if (ret) - goto exit; - } return ret; -exit: - hwmgr->pm_en = 0; - cgs_notify_dpm_enabled(hwmgr->device, false); - return 0; - } static int pp_hw_fini(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret == 0) - hwmgr_hw_fini(hwmgr); + hwmgr_hw_fini(hwmgr); return 0; } @@ -194,11 +145,8 @@ static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret == 0) + if (hwmgr && hwmgr->pm_en) pp_dpm_dispatch_tasks(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); @@ -233,12 +181,9 @@ static int pp_set_powergating_state(void *handle, { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -254,38 +199,16 @@ static int pp_suspend(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret = 0; - ret = pp_check(hwmgr); - if (ret == 0) - hwmgr_hw_suspend(hwmgr); - return 0; + return hwmgr_suspend(hwmgr); } static int pp_resume(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; - - ret = pp_check(hwmgr); - - if (ret < 0) - return ret; - - if (hwmgr->smumgr_funcs->start_smu == NULL) - return -EINVAL; - - if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { - pr_err("smc start failed\n"); - hwmgr->smumgr_funcs->smu_fini(hwmgr); - return -EINVAL; - } - - if (ret == PP_DPM_DISABLED) - return 0; - return hwmgr_hw_resume(hwmgr); + return hwmgr_resume(hwmgr); } static int pp_set_clockgating_state(void *handle, @@ -334,12 +257,9 @@ static int pp_dpm_fw_loading_complete(void *handle) static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->update_clock_gatings == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -389,12 +309,9 @@ static int pp_dpm_force_performance_level(void *handle, enum amd_dpm_forced_level level) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (level == hwmgr->dpm_level) return 0; @@ -412,13 +329,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; enum amd_dpm_forced_level level; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); level = hwmgr->dpm_level; @@ -429,13 +343,10 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( static uint32_t pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t clk = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_sclk == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -450,13 +361,10 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low) static uint32_t pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t clk = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_mclk == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -471,11 +379,8 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low) static void pp_dpm_powergate_vce(void *handle, bool gate) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->powergate_vce == NULL) { @@ -490,11 +395,8 @@ static void pp_dpm_powergate_vce(void *handle, bool gate) static void pp_dpm_powergate_uvd(void *handle, bool gate) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->powergate_uvd == NULL) { @@ -512,10 +414,8 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, int ret = 0; struct pp_hwmgr *hwmgr = handle; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); ret = hwmgr_handle_task(hwmgr, task_id, user_state); @@ -528,15 +428,9 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) { struct pp_hwmgr *hwmgr = handle; struct pp_power_state *state; - int ret = 0; enum amd_pm_state_type pm_type; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->current_ps == NULL) + if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -568,11 +462,8 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return; if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { @@ -587,13 +478,10 @@ static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) static uint32_t pp_dpm_get_fan_control_mode(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; uint32_t mode = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return 0; if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -610,10 +498,8 @@ static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -630,10 +516,8 @@ static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -651,10 +535,8 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) return -EINVAL; @@ -670,16 +552,10 @@ static int pp_dpm_get_pp_num_states(void *handle, { struct pp_hwmgr *hwmgr = handle; int i; - int ret = 0; memset(data, 0, sizeof(*data)); - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (hwmgr->ps == NULL) + if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -713,15 +589,9 @@ static int pp_dpm_get_pp_num_states(void *handle, static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; int size = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (!hwmgr->soft_pp_table) + if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -736,10 +606,6 @@ static int amd_powerplay_reset(void *handle) struct pp_hwmgr *hwmgr = handle; int ret; - ret = pp_check(hwmgr); - if (ret) - return ret; - ret = hwmgr_hw_fini(hwmgr); if (ret) return ret; @@ -756,10 +622,8 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); if (!hwmgr->hardcode_pp_table) { @@ -796,10 +660,8 @@ static int pp_dpm_force_clock_level(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->force_clock_level == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -820,10 +682,8 @@ static int pp_dpm_print_clock_levels(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->print_clock_levels == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -840,10 +700,8 @@ static int pp_dpm_get_sclk_od(void *handle) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_sclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -860,10 +718,8 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_sclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -881,10 +737,8 @@ static int pp_dpm_get_mclk_od(void *handle) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->get_mclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -901,10 +755,8 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_mclk_od == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -922,11 +774,7 @@ static int pp_dpm_read_sensor(void *handle, int idx, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (value == NULL) + if (!hwmgr || !hwmgr->pm_en || !value) return -EINVAL; switch (idx) { @@ -948,14 +796,11 @@ static struct amd_vce_state* pp_dpm_get_vce_clock_state(void *handle, unsigned idx) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) + if (!hwmgr || !hwmgr->pm_en) return NULL; - if (hwmgr && idx < hwmgr->num_vce_state_tables) + if (idx < hwmgr->num_vce_state_tables) return &hwmgr->vce_states[idx]; return NULL; } @@ -964,7 +809,7 @@ static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; - if (!buf || pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en || !buf) return -EINVAL; if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) { @@ -980,12 +825,12 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) struct pp_hwmgr *hwmgr = handle; int ret = -EINVAL; - if (pp_check(hwmgr)) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return ret; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { pr_info("%s was not implemented.\n", __func__); - return -EINVAL; + return ret; } mutex_lock(&hwmgr->smu_lock); if (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) @@ -998,7 +843,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3 { struct pp_hwmgr *hwmgr = handle; - if (pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en) return -EINVAL; if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) { @@ -1016,7 +861,7 @@ static int pp_dpm_switch_power_profile(void *handle, long workload; uint32_t index; - if (pp_check(hwmgr)) + if (!hwmgr || !hwmgr->pm_en) return -EINVAL; if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) { @@ -1058,10 +903,8 @@ static int pp_dpm_notify_smu_memory_info(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -1082,12 +925,9 @@ static int pp_dpm_notify_smu_memory_info(void *handle, static int pp_set_power_limit(void *handle, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_power_limit == NULL) { pr_info("%s was not implemented.\n", __func__); @@ -1104,20 +944,14 @@ static int pp_set_power_limit(void *handle, uint32_t limit) hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); hwmgr->power_limit = limit; mutex_unlock(&hwmgr->smu_lock); - return ret; + return 0; } static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (limit == NULL) + if (!hwmgr || !hwmgr->pm_en ||!limit) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1129,19 +963,16 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit) mutex_unlock(&hwmgr->smu_lock); - return ret; + return 0; } static int pp_display_configuration_change(void *handle, const struct amd_pp_display_configuration *display_config) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); phm_store_dal_configuration_data(hwmgr, display_config); @@ -1155,12 +986,7 @@ static int pp_get_display_power_level(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (output == NULL) + if (!hwmgr || !hwmgr->pm_en ||!output) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1177,10 +1003,8 @@ static int pp_get_current_clocks(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1225,10 +1049,8 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (clocks == NULL) return -EINVAL; @@ -1246,11 +1068,7 @@ static int pp_get_clock_by_type_with_latency(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clocks) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1266,11 +1084,7 @@ static int pp_get_clock_by_type_with_voltage(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clocks) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1287,11 +1101,7 @@ static int pp_set_watermarks_for_clocks_ranges(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!wm_with_clock_ranges) + if (!hwmgr || !hwmgr->pm_en ||!wm_with_clock_ranges) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1308,11 +1118,7 @@ static int pp_display_clock_voltage_request(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - if (ret) - return ret; - - if (!clock) + if (!hwmgr || !hwmgr->pm_en ||!clock) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1328,12 +1134,7 @@ static int pp_get_display_mode_validation_clocks(void *handle, struct pp_hwmgr *hwmgr = handle; int ret = 0; - ret = pp_check(hwmgr); - - if (ret) - return ret; - - if (clocks == NULL) + if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -1348,12 +1149,9 @@ static int pp_get_display_mode_validation_clocks(void *handle, static int pp_set_mmhub_powergating_by_smu(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - ret = pp_check(hwmgr); - if (ret) - return ret; + if (!hwmgr || !hwmgr->pm_en) + return -EINVAL; if (hwmgr->hwmgr_func->set_mmhub_powergating_by_smu == NULL) { pr_info("%s was not implemented.\n", __func__); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index ae2e9339dd6b..dcceadb2e172 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -75,8 +75,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr, int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) { - int ret = 1; - bool enabled; + int ret = -EINVAL;; PHM_FUNC_CHECK(hwmgr); if (smum_is_dpm_running(hwmgr)) { @@ -87,17 +86,12 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); - enabled = ret == 0; - - cgs_notify_dpm_enabled(hwmgr->device, enabled); - return ret; } int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) { - int ret = -1; - bool enabled; + int ret = -EINVAL; PHM_FUNC_CHECK(hwmgr); @@ -109,10 +103,6 @@ int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) if (hwmgr->hwmgr_func->dynamic_state_management_disable) ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); - enabled = ret == 0 ? false : true; - - cgs_notify_dpm_enabled(hwmgr->device, enabled); - return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 42982055b161..30ff8a9c301b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -76,7 +76,7 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) int hwmgr_early_init(struct pp_hwmgr *hwmgr) { - if (hwmgr == NULL) + if (!hwmgr) return -EINVAL; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; @@ -170,17 +170,51 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) return 0; } +int hwmgr_sw_init(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr|| !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->smu_init) + return -EINVAL; + + phm_register_irq_handlers(hwmgr); + + return hwmgr->smumgr_funcs->smu_init(hwmgr); +} + + +int hwmgr_sw_fini(struct pp_hwmgr *hwmgr) +{ + if (hwmgr && hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->smu_fini) + hwmgr->smumgr_funcs->smu_fini(hwmgr); + + return 0; +} + int hwmgr_hw_init(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) + if (!hwmgr || !hwmgr->smumgr_funcs) return -EINVAL; - if (hwmgr->pptable_func == NULL || - hwmgr->pptable_func->pptable_init == NULL || - hwmgr->hwmgr_func->backend_init == NULL) - return -EINVAL; + if (hwmgr->smumgr_funcs->start_smu) { + ret = hwmgr->smumgr_funcs->start_smu(hwmgr); + if (ret) { + pr_err("smc start failed\n"); + return -EINVAL; + } + } + + if (!hwmgr->pm_en) + return 0; + + if (!hwmgr->pptable_func || + !hwmgr->pptable_func->pptable_init || + !hwmgr->hwmgr_func->backend_init) { + hwmgr->pm_en = false; + ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false; + pr_info("dpm not supported \n"); + return 0; + } ret = hwmgr->pptable_func->pptable_init(hwmgr); if (ret) @@ -214,14 +248,13 @@ err1: if (hwmgr->pptable_func->pptable_fini) hwmgr->pptable_func->pptable_fini(hwmgr); err: - pr_err("amdgpu: powerplay initialization failed\n"); return ret; } int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) { - if (hwmgr == NULL) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return 0; phm_stop_thermal_controller(hwmgr); psm_set_boot_states(hwmgr); @@ -236,12 +269,12 @@ int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) return psm_fini_power_state_table(hwmgr); } -int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) +int hwmgr_suspend(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) - return -EINVAL; + if (!hwmgr || !hwmgr->pm_en) + return 0; phm_disable_smc_firmware_ctf(hwmgr); ret = psm_set_boot_states(hwmgr); @@ -255,13 +288,23 @@ int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr) return ret; } -int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) +int hwmgr_resume(struct pp_hwmgr *hwmgr) { int ret = 0; - if (hwmgr == NULL) + if (!hwmgr) return -EINVAL; + if (hwmgr->smumgr_funcs && hwmgr->smumgr_funcs->start_smu) { + if (hwmgr->smumgr_funcs->start_smu(hwmgr)) { + pr_err("smc start failed\n"); + return -EINVAL; + } + } + + if (!hwmgr->pm_en) + return 0; + ret = phm_setup_asic(hwmgr); if (ret) return ret; @@ -270,9 +313,6 @@ int hwmgr_hw_resume(struct pp_hwmgr *hwmgr) if (ret) return ret; ret = phm_start_thermal_controller(hwmgr); - if (ret) - return ret; - ret |= psm_set_performance_states(hwmgr); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 17f811d181c8..d6c9a3bac0a9 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -782,10 +782,13 @@ struct pp_hwmgr { }; int hwmgr_early_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_fini(struct pp_hwmgr *hwmgr); int hwmgr_hw_init(struct pp_hwmgr *hwmgr); int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); -int hwmgr_hw_suspend(struct pp_hwmgr *hwmgr); -int hwmgr_hw_resume(struct pp_hwmgr *hwmgr); +int hwmgr_suspend(struct pp_hwmgr *hwmgr); +int hwmgr_resume(struct pp_hwmgr *hwmgr); + int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, enum amd_pm_state_type *user_state); -- cgit v1.2.1 From b61e54cb1881c7cb74787da6a5d39d8d48dcc075 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 22 Mar 2018 15:12:59 +0800 Subject: drm/amd/pp: Lock pm_funcs when set pp table unlock mutex until set pp table completely to avoid conflict if other pp functions were called simultaneously. use hwmgr_handle_task instand of pp_dpm_dispatch_tasks. It is not make sense that call pp_functions in ip_functions. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 28 +++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 6503bbfdc76e..9ada102e253c 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -31,8 +31,6 @@ #include "amdgpu.h" #include "hwmgr.h" -static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, - enum amd_pm_state_type *user_state); static const struct amd_pm_funcs pp_dpm_funcs; @@ -146,10 +144,12 @@ static int pp_late_init(void *handle) struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - if (hwmgr && hwmgr->pm_en) - pp_dpm_dispatch_tasks(hwmgr, + if (hwmgr && hwmgr->pm_en) { + mutex_lock(&hwmgr->smu_lock); + hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); - + mutex_unlock(&hwmgr->smu_lock); + } return 0; } @@ -620,7 +620,7 @@ static int amd_powerplay_reset(void *handle) static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; + int ret = -ENOMEM; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -630,28 +630,28 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!hwmgr->hardcode_pp_table) { - mutex_unlock(&hwmgr->smu_lock); - return -ENOMEM; - } + if (!hwmgr->hardcode_pp_table) + goto err; } memcpy(hwmgr->hardcode_pp_table, buf, size); hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; - mutex_unlock(&hwmgr->smu_lock); ret = amd_powerplay_reset(handle); if (ret) - return ret; + goto err; if (hwmgr->hwmgr_func->avfs_control) { ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); if (ret) - return ret; + goto err; } - + mutex_unlock(&hwmgr->smu_lock); return 0; +err: + mutex_unlock(&hwmgr->smu_lock); + return ret; } static int pp_dpm_force_clock_level(void *handle, -- cgit v1.2.1 From 8bb575a2d83af097980641d864401b303286755c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 22 Mar 2018 15:46:47 +0800 Subject: drm/amd/pp: Save vf state in pp context Store vf state in pp_context so we can deprecate the cgs interface. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 ++- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 7 +++---- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 3 +-- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 10 +++++----- drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 3 +-- 6 files changed, 13 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 9ada102e253c..337af789d258 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -46,7 +46,8 @@ static int amd_powerplay_create(struct amdgpu_device *adev) return -ENOMEM; hwmgr->adev = adev; - hwmgr->pm_en = (amdgpu_dpm != 0 && !amdgpu_sriov_vf(adev)) ? true : false; + hwmgr->not_vf = !amdgpu_sriov_vf(adev); + hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; hwmgr->device = amdgpu_cgs_create_device(adev); mutex_init(&hwmgr->smu_lock); hwmgr->chip_family = adev->family; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index d6c9a3bac0a9..d5cadc61c9b3 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -718,6 +718,7 @@ struct pp_hwmgr { uint32_t chip_family; uint32_t chip_id; uint32_t smu_version; + bool not_vf; bool pm_en; struct mutex smu_lock; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index faef78321446..35b947e5292c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -288,8 +288,7 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) struct fiji_smumgr *priv = (struct fiji_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) - || cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { /* Check if SMU is running in protected mode */ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, @@ -335,8 +334,8 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) uint32_t efuse = 0; uint32_t mask = (1 << ((AVFS_EN_MSB - AVFS_EN_LSB) + 1)) - 1; - if (cgs_is_virtualization_enabled(hwmgr->device)) - return 0; + if (!hwmgr->not_vf) + return false; if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index fe6854eecf7b..05e60e8fee0b 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -295,8 +295,7 @@ static int polaris10_start_smu(struct pp_hwmgr *hwmgr) struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend); /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) - || cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { smu_data->protected_mode = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); smu_data->smu7_data.security_hard_key = (uint8_t) (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 0399c10d2be0..3684822b75b2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -375,7 +375,7 @@ static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr, entry->meta_data_addr_low = 0; /* digest need be excluded out */ - if (cgs_is_virtualization_enabled(hwmgr->device)) + if (!hwmgr->not_vf) info.image_size -= 20; entry->data_size_byte = info.image_size; entry->num_register_entries = 0; @@ -409,7 +409,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) 0x0); if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */ - if (!cgs_is_virtualization_enabled(hwmgr->device)) { + if (hwmgr->not_vf) { smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SMU_DRAM_ADDR_HI, upper_32_bits(smu_data->smu_buffer.mc_addr)); @@ -467,7 +467,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); - if (cgs_is_virtualization_enabled(hwmgr->device)) + if (!hwmgr->not_vf) PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr, UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]), "Failed to Get Firmware Entry.", return -EINVAL); @@ -608,7 +608,7 @@ int smu7_init(struct pp_hwmgr *hwmgr) smu_data->header = smu_data->header_buffer.kaddr; smu_data->header_buffer.mc_addr = mc_addr; - if (cgs_is_virtualization_enabled(hwmgr->device)) + if (!hwmgr->not_vf) return 0; smu_data->smu_buffer.data_size = 200*4096; @@ -643,7 +643,7 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) &smu_data->header_buffer.mc_addr, &smu_data->header_buffer.kaddr); - if (!cgs_is_virtualization_enabled(hwmgr->device)) + if (hwmgr->not_vf) amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle, &smu_data->smu_buffer.mc_addr, &smu_data->smu_buffer.kaddr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b51d7468c3e7..2ba05d2b4302 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -199,8 +199,7 @@ static int tonga_start_smu(struct pp_hwmgr *hwmgr) int result; /* Only start SMC if SMC RAM is not running */ - if (!(smu7_is_smc_ram_running(hwmgr) || - cgs_is_virtualization_enabled(hwmgr->device))) { + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { /*Check if SMU is running in protected mode*/ if (0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)) { -- cgit v1.2.1 From 64f6db77fcb81493988061587fa478e6612dc45b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 22 Mar 2018 19:32:45 +0800 Subject: drm/amd/pp: Use release_firmware directly in powerplay Use kernel api directly so we can deprecate the cgs interface. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 6 +++++- drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 1 - drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 1 - 3 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 337af789d258..b91ef113a490 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "amd_shared.h" #include "amd_powerplay.h" #include "power_state.h" @@ -107,8 +108,11 @@ static int pp_sw_fini(void *handle) hwmgr_sw_fini(hwmgr); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) + if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) { + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; amdgpu_ucode_fini_bo(adev); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 08d000140eca..e30a2eea1fba 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2784,7 +2784,6 @@ static int ci_smu_fini(struct pp_hwmgr *hwmgr) { kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 3684822b75b2..41fab2df994e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -650,6 +650,5 @@ int smu7_smu_fini(struct pp_hwmgr *hwmgr) kfree(hwmgr->smu_backend); hwmgr->smu_backend = NULL; - cgs_rel_firmware(hwmgr->device, CGS_UCODE_ID_SMU); return 0; } -- cgit v1.2.1 From b13aa1091fb2002a6854e0401df5fc6231fbca58 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 26 Mar 2018 16:18:34 +0800 Subject: drm/amdgpu: Use dpm_enabled as dpm state flag driver will set dpm_enabled to true only when module parameter amdgpu_dpm not equal to 0 and smu hw initialize successfully. Reviewed-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 2 +- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 2 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 2 +- drivers/gpu/drm/amd/amdgpu/si_dpm.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 4 ++-- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 3 ++- 7 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 448d69fe3756..c98e59721444 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -428,7 +428,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, if (size & 3 || *pos & 0x3) return -EINVAL; - if (amdgpu_dpm == 0) + if (!adev->pm.dpm_enabled) return -EINVAL; /* convert offset to sensor number */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4b7824d30e73..bd9e723dbb2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -704,7 +704,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct pp_gpu_power query = {0}; int query_size = sizeof(query); - if (amdgpu_dpm == 0) + if (!adev->pm.dpm_enabled) return -ENOENT; switch (info->sensor_info.type) { diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 47ef3e6e7178..be6b19951e6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -6255,7 +6255,7 @@ static int ci_dpm_late_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_dpm) + if (!adev->pm.dpm_enabled) return 0; /* init the sysfs and debugfs files late */ diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 26ba984ab2b7..bc1720ea4959 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2974,7 +2974,7 @@ static int kv_dpm_late_init(void *handle) /* powerdown unused blocks for now */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_dpm) + if (!adev->pm.dpm_enabled) return 0; kv_dpm_powergate_acp(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index 797d505bf9ee..b12d7c9d42a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c @@ -7580,7 +7580,7 @@ static int si_dpm_late_init(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_dpm) + if (!adev->pm.dpm_enabled) return 0; ret = si_set_temperature_range(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 948bb9437757..87cbb142dd0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -688,7 +688,7 @@ static int uvd_v4_2_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); - if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { if (!(RREG32_SMC(ixCURRENT_PG_STATUS) & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK)) { WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | @@ -699,7 +699,7 @@ static int uvd_v4_2_set_powergating_state(void *handle, } return 0; } else { - if (adev->pg_flags & AMD_PG_SUPPORT_UVD && amdgpu_dpm == 0) { + if (adev->pg_flags & AMD_PG_SUPPORT_UVD && !adev->pm.dpm_enabled) { if (RREG32_SMC(ixCURRENT_PG_STATUS) & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { WREG32(mmUVD_PGFSM_CONFIG, (UVD_PGFSM_CONFIG__UVD_PGFSM_FSM_ADDR_MASK | diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 30ff8a9c301b..bca67df29c8c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -211,7 +211,6 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) !hwmgr->pptable_func->pptable_init || !hwmgr->hwmgr_func->backend_init) { hwmgr->pm_en = false; - ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = false; pr_info("dpm not supported \n"); return 0; } @@ -240,6 +239,8 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) if (ret) goto err2; + ((struct amdgpu_device *)hwmgr->adev)->pm.dpm_enabled = true; + return 0; err2: if (hwmgr->hwmgr_func->backend_fini) -- cgit v1.2.1 From 986567e4ed81a21a66e841b9e87e708c435328d8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 27 Mar 2018 09:32:57 +0100 Subject: drm/amd/pp: Fix spelling mistake: "suppported" -> "supported" Trivial fix to spelling mistake in pr_warn warning message text Reviewed-by: Rex Zhu Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 0f2851b5b368..308bff2b5d1d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -46,7 +46,7 @@ int psm_init_power_state_table(struct pp_hwmgr *hwmgr) sizeof(struct pp_power_state); if (table_entries == 0 || size == 0) { - pr_warn("Please check whether power state management is suppported on this asic\n"); + pr_warn("Please check whether power state management is supported on this asic\n"); return 0; } -- cgit v1.2.1 From 62fd51275e4d43e300f95f2148a41e5bf738ac29 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 23 Mar 2018 18:18:23 +0800 Subject: drm/amd/pp: Use gfx rlc funcs directly in powerplay MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to remove cgs interfaces: cgs_enter_safe_mode cgs_lock_grbm_idx Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 13 ++-- .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 71 ++++++++++++---------- 2 files changed, 46 insertions(+), 38 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 03bc7453f3b1..a55ee166ce9f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -740,8 +740,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - cgs_enter_safe_mode(hwmgr->device, true); - cgs_lock_grbm_idx(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); + mutex_lock(&adev->grbm_idx_mutex); value = 0; value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { @@ -781,8 +781,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == result), "Failed to enable DPM DIDT.", return result); } - cgs_lock_grbm_idx(hwmgr->device, false); - cgs_enter_safe_mode(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -791,13 +791,14 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) { int result; + struct amdgpu_device *adev = hwmgr->adev; if (PP_CAP(PHM_PlatformCaps_SQRamping) || PP_CAP(PHM_PlatformCaps_DBRamping) || PP_CAP(PHM_PlatformCaps_TDRamping) || PP_CAP(PHM_PlatformCaps_TCPRamping)) { - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); result = smu7_enable_didt(hwmgr, false); PP_ASSERT_WITH_CODE((result == 0), @@ -809,7 +810,7 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == result), "Failed to disable DPM DIDT.", return result); } - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index ba63faefc61f..203a6918395b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -930,16 +930,16 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); + mutex_lock(&adev->grbm_idx_mutex); reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); @@ -959,38 +959,40 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) break; } cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_disable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) { - cgs_enter_safe_mode(hwmgr->device, true); + struct amdgpu_device *adev = hwmgr->adev; + + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); + mutex_lock(&adev->grbm_idx_mutex); reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); @@ -1004,11 +1006,11 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) break; } cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, GCDiDtDroopCtrlConfig_vega10); if (PP_CAP(PHM_PlatformCaps_GCEDC)) @@ -1022,13 +1024,14 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1043,16 +1046,16 @@ static int vega10_disable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); + mutex_lock(&adev->grbm_idx_mutex); reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); @@ -1068,41 +1071,43 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) break; } cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr) { - cgs_enter_safe_mode(hwmgr->device, true); + struct amdgpu_device *adev = hwmgr->adev; + + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0; uint32_t count, data; - struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; num_se = adev->gfx.config.max_shader_engines; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); - cgs_lock_grbm_idx(hwmgr->device, true); + mutex_lock(&adev->grbm_idx_mutex); reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); @@ -1116,11 +1121,11 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) break; } cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); vega10_program_gc_didt_config_registers(hwmgr, PSMGCEDCDroopCtrlConfig_vega10); @@ -1137,13 +1142,14 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t data; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); if (PP_CAP(PHM_PlatformCaps_GCEDC)) { data = 0x00000000; @@ -1158,15 +1164,16 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; int result; - cgs_enter_safe_mode(hwmgr->device, true); + adev->gfx.rlc.funcs->enter_safe_mode(adev); - cgs_lock_grbm_idx(hwmgr->device, true); + mutex_lock(&adev->grbm_idx_mutex); reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); cgs_write_register(hwmgr->device, reg, 0xE0000000); - cgs_lock_grbm_idx(hwmgr->device, false); + mutex_unlock(&adev->grbm_idx_mutex); result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCCtrlForceStallConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1175,7 +1182,7 @@ static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) vega10_didt_set_mask(hwmgr, false); - cgs_enter_safe_mode(hwmgr->device, false); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return 0; } -- cgit v1.2.1 From d32d661770a455802afc8d2f9efed617cc8073ed Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 23 Mar 2018 18:36:51 +0800 Subject: drm/amdgpu: Get pci resource directly through adev MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to remove the cgs wrapper function cgs_get_pci_resource Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index a29362f9ef41..03ee36739efe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -290,12 +290,11 @@ static int acp_hw_init(void *handle) else if (r) return r; - r = cgs_get_pci_resource(adev->acp.cgs_device, CGS_RESOURCE_TYPE_MMIO, - 0x5289, 0, &acp_base); - if (r == -ENODEV) - return 0; - else if (r) - return r; + if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) + return -EINVAL; + + acp_base = adev->rmmio_base; + if (adev->asic_type != CHIP_STONEY) { adev->acp.acp_genpd = kzalloc(sizeof(struct acp_pm_domain), GFP_KERNEL); if (adev->acp.acp_genpd == NULL) -- cgit v1.2.1 From e8ee21d2a46d8d5cc85766fb49251c2c21871b30 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 26 Mar 2018 18:13:28 +0800 Subject: drm/amd/dc: Use atombios api directly in DC In order to remove the cgs wrapper functions for atombios api. Reviewed-by: Harry Wentland Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/bios/command_table.c | 22 +++++++++--------- .../gpu/drm/amd/display/dc/bios/command_table2.c | 26 +++++++++++++--------- 2 files changed, 28 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 4b5fdd577848..651e1fd4622f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -24,7 +24,7 @@ */ #include "dm_services.h" - +#include "amdgpu.h" #include "atom.h" #include "include/bios_parser_interface.h" @@ -35,16 +35,16 @@ #include "bios_parser_types_internal.h" #define EXEC_BIOS_CMD_TABLE(command, params)\ - (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \ + (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), \ - ¶ms) == 0) + (uint32_t *)¶ms) == 0) #define BIOS_CMD_TABLE_REVISION(command, frev, crev)\ - cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \ + amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GetIndexIntoMasterTable(COMMAND, command), &frev, &crev) #define BIOS_CMD_TABLE_PARA_REVISION(command)\ - bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \ + bios_cmd_table_para_revision(bp->base.ctx->driver_context, \ GetIndexIntoMasterTable(COMMAND, command)) static void init_dig_encoder_control(struct bios_parser *bp); @@ -82,16 +82,18 @@ void dal_bios_parser_init_cmd_tbl(struct bios_parser *bp) init_set_dce_clock(bp); } -static uint32_t bios_cmd_table_para_revision(void *cgs_device, +static uint32_t bios_cmd_table_para_revision(void *dev, uint32_t index) { + struct amdgpu_device *adev = dev; uint8_t frev, crev; - if (cgs_atom_get_cmd_table_revs(cgs_device, + if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, - &frev, &crev) != 0) + &frev, &crev)) + return crev; + else return 0; - return crev; } /******************************************************************************* @@ -368,7 +370,7 @@ static void init_transmitter_control(struct bios_parser *bp) uint8_t crev; if (BIOS_CMD_TABLE_REVISION(UNIPHYTransmitterControl, - frev, crev) != 0) + frev, crev) == false) BREAK_TO_DEBUGGER(); switch (crev) { case 2: diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 3f63f712c8a4..752b08a42d3e 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -26,14 +26,18 @@ #include "dm_services.h" #include "ObjectID.h" -#include "atomfirmware.h" +#include "atomfirmware.h" +#include "atom.h" #include "include/bios_parser_interface.h" #include "command_table2.h" #include "command_table_helper2.h" #include "bios_parser_helper.h" #include "bios_parser_types_internal2.h" +#include "amdgpu.h" + + #define DC_LOGGER \ bp->base.ctx->logger @@ -43,16 +47,16 @@ ->FieldName)-(char *)0)/sizeof(uint16_t)) #define EXEC_BIOS_CMD_TABLE(fname, params)\ - (cgs_atom_exec_cmd_table(bp->base.ctx->cgs_device, \ + (amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), \ - ¶ms) == 0) + (uint32_t *)¶ms) == 0) #define BIOS_CMD_TABLE_REVISION(fname, frev, crev)\ - cgs_atom_get_cmd_table_revs(bp->base.ctx->cgs_device, \ + amdgpu_atom_parse_cmd_header(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname), &frev, &crev) #define BIOS_CMD_TABLE_PARA_REVISION(fname)\ - bios_cmd_table_para_revision(bp->base.ctx->cgs_device, \ + bios_cmd_table_para_revision(bp->base.ctx->driver_context, \ GET_INDEX_INTO_MASTER_TABLE(command, fname)) static void init_dig_encoder_control(struct bios_parser *bp); @@ -86,16 +90,18 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) init_get_smu_clock_info(bp); } -static uint32_t bios_cmd_table_para_revision(void *cgs_device, +static uint32_t bios_cmd_table_para_revision(void *dev, uint32_t index) { + struct amdgpu_device *adev = dev; uint8_t frev, crev; - if (cgs_atom_get_cmd_table_revs(cgs_device, + if (amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, - &frev, &crev) != 0) + &frev, &crev)) + return crev; + else return 0; - return crev; } /****************************************************************************** @@ -201,7 +207,7 @@ static void init_transmitter_control(struct bios_parser *bp) uint8_t frev; uint8_t crev; - if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) != 0) + if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false) BREAK_TO_DEBUGGER(); switch (crev) { case 6: -- cgit v1.2.1 From b3892e2bb519fe18225d0628f0dd255761f16502 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 26 Mar 2018 18:49:35 +0800 Subject: drm/amd/pp: Use atombios api directly in powerplay (v2) In order to remove the cgs wrapper functions for atombios api. v2: squash in whitespace cleanup (Alex) Reviewed-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 145 +++++++++++---------- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 2 +- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 90 ++++++------- .../amd/powerplay/hwmgr/process_pptables_v1_0.c | 2 +- .../gpu/drm/amd/powerplay/hwmgr/processpptables.c | 4 +- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 +- drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 3 +- drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 16 +++ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h | 3 + .../amd/powerplay/hwmgr/vega10_processpptables.c | 2 +- .../amd/powerplay/hwmgr/vega12_processpptables.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 2 +- 12 files changed, 150 insertions(+), 124 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index c6febbf0bf69..971fb5dfb620 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -23,7 +23,7 @@ #include "pp_debug.h" #include #include - +#include "atom.h" #include "ppatomctrl.h" #include "atombios.h" #include "cgs_common.h" @@ -128,7 +128,6 @@ static int atomctrl_set_mc_reg_address_table( return 0; } - int atomctrl_initialize_mc_reg_table( struct pp_hwmgr *hwmgr, uint8_t module_index, @@ -141,7 +140,7 @@ int atomctrl_initialize_mc_reg_table( u16 size; vram_info = (ATOM_VRAM_INFO_HEADER_V2_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, VRAM_Info), &size, &frev, &crev); if (module_index >= vram_info->ucNumOfVRAMModule) { @@ -174,6 +173,8 @@ int atomctrl_set_engine_dram_timings_rv770( uint32_t engine_clock, uint32_t memory_clock) { + struct amdgpu_device *adev = hwmgr->adev; + SET_ENGINE_CLOCK_PS_ALLOCATION engine_clock_parameters; /* They are both in 10KHz Units. */ @@ -184,9 +185,10 @@ int atomctrl_set_engine_dram_timings_rv770( /* in 10 khz units.*/ engine_clock_parameters.sReserved.ulClock = cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); - return cgs_atom_exec_cmd_table(hwmgr->device, + + return amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - &engine_clock_parameters); + (uint32_t *)&engine_clock_parameters); } /** @@ -203,7 +205,7 @@ static ATOM_VOLTAGE_OBJECT_INFO *get_voltage_info_table(void *device) union voltage_object_info *voltage_info; voltage_info = (union voltage_object_info *) - cgs_atom_get_data_table(device, index, + smu_atom_get_data_table(device, index, &size, &frev, &crev); if (voltage_info != NULL) @@ -247,16 +249,16 @@ int atomctrl_get_memory_pll_dividers_si( pp_atomctrl_memory_clock_param *mpll_param, bool strobe_mode) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; int result; mpll_parameters.ulClock = cpu_to_le32(clock_value); mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - &mpll_parameters); + (uint32_t *)&mpll_parameters); if (0 == result) { mpll_param->mpll_fb_divider.clk_frac = @@ -295,14 +297,15 @@ int atomctrl_get_memory_pll_dividers_si( int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; int result; mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), - &mpll_parameters); + (uint32_t *)&mpll_parameters); if (!result) mpll_param->mpll_post_divider = @@ -315,15 +318,15 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; int result; pll_parameters.ulClock = cpu_to_le32(clock_value); - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_parameters); + (uint32_t *)&pll_parameters); if (0 == result) { dividers->pll_post_divider = pll_parameters.ucPostDiv; @@ -338,16 +341,16 @@ int atomctrl_get_engine_pll_dividers_vi( uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->pll_post_divider = @@ -375,16 +378,16 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_ai *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; int result; pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value); pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->usSclk_fcw_frac = le16_to_cpu(pll_patameters.usSclk_fcw_frac); @@ -407,6 +410,7 @@ int atomctrl_get_dfs_pll_dividers_vi( uint32_t clock_value, pp_atomctrl_clock_dividers_vi *dividers) { + struct amdgpu_device *adev = hwmgr->adev; COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; int result; @@ -414,10 +418,9 @@ int atomctrl_get_dfs_pll_dividers_vi( pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL), - &pll_patameters); + (uint32_t *)&pll_patameters); if (0 == result) { dividers->pll_post_divider = @@ -452,7 +455,7 @@ uint32_t atomctrl_get_reference_clock(struct pp_hwmgr *hwmgr) uint32_t clock; fw_info = (ATOM_FIRMWARE_INFO *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); @@ -476,7 +479,7 @@ bool atomctrl_is_voltage_controlled_by_gpio_v3( uint8_t voltage_mode) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); bool ret; PP_ASSERT_WITH_CODE((NULL != voltage_info), @@ -495,7 +498,7 @@ int atomctrl_get_voltage_table_v3( pp_atomctrl_voltage_table *voltage_table) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; unsigned int i; @@ -572,7 +575,7 @@ static ATOM_GPIO_PIN_LUT *get_gpio_lookup_table(void *device) void *table_address; table_address = (ATOM_GPIO_PIN_LUT *) - cgs_atom_get_data_table(device, + smu_atom_get_data_table(device, GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT), &size, &frev, &crev); @@ -592,7 +595,7 @@ bool atomctrl_get_pp_assign_pin( { bool bRet = false; ATOM_GPIO_PIN_LUT *gpio_lookup_table = - get_gpio_lookup_table(hwmgr->device); + get_gpio_lookup_table(hwmgr->adev); PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), "Could not find GPIO lookup Table in BIOS.", return false); @@ -613,7 +616,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( bool debug) { ATOM_ASIC_PROFILING_INFO_V3_4 *getASICProfilingInfo; - + struct amdgpu_device *adev = hwmgr->adev; EFUSE_LINEAR_FUNC_PARAM sRO_fuse; EFUSE_LINEAR_FUNC_PARAM sCACm_fuse; EFUSE_LINEAR_FUNC_PARAM sCACb_fuse; @@ -640,7 +643,7 @@ int atomctrl_calculate_voltage_evv_on_sclk( int result; getASICProfilingInfo = (ATOM_ASIC_PROFILING_INFO_V3_4 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), NULL, NULL, NULL); @@ -706,9 +709,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -727,9 +730,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -747,9 +750,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sInput_FuseValues.ucBitLength = sCACb_fuse.ucEfuseLength; sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -768,9 +771,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -790,9 +793,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -811,9 +814,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sInput_FuseValues.ucBitLength = sKv_b_fuse.ucEfuseLength; sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -842,9 +845,9 @@ int atomctrl_calculate_voltage_evv_on_sclk( sOutput_FuseValues.sEfuse = sInput_FuseValues; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &sOutput_FuseValues); + (uint32_t *)&sOutput_FuseValues); if (result) return result; @@ -1053,8 +1056,9 @@ int atomctrl_get_voltage_evv_on_sclk( uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage) { - int result; + struct amdgpu_device *adev = hwmgr->adev; GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; + int result; get_voltage_info_param_space.ucVoltageType = voltage_type; @@ -1065,9 +1069,9 @@ int atomctrl_get_voltage_evv_on_sclk( get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space); if (0 != result) return result; @@ -1088,9 +1092,10 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage) { + struct amdgpu_device *adev = hwmgr->adev; + GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; int result; int entry_id; - GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space; /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) { @@ -1111,9 +1116,9 @@ int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space); if (0 != result) return result; @@ -1135,7 +1140,7 @@ uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr) u16 size; fw_info = (ATOM_COMMON_TABLE_HEADER *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); @@ -1167,7 +1172,7 @@ static ATOM_ASIC_INTERNAL_SS_INFO *asic_internal_ss_get_ss_table(void *device) u16 size; table = (ATOM_ASIC_INTERNAL_SS_INFO *) - cgs_atom_get_data_table(device, + smu_atom_get_data_table(device, GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info), &size, &frev, &crev); @@ -1188,7 +1193,7 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr, memset(ssEntry, 0x00, sizeof(pp_atomctrl_internal_ss_info)); - table = asic_internal_ss_get_ss_table(hwmgr->device); + table = asic_internal_ss_get_ss_table(hwmgr->adev); if (NULL == table) return -1; @@ -1260,9 +1265,10 @@ int atomctrl_get_engine_clock_spread_spectrum( ASIC_INTERNAL_ENGINE_SS, engine_clock, ssInfo); } -int atomctrl_read_efuse(void *device, uint16_t start_index, +int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t mask, uint32_t *efuse) { + struct amdgpu_device *adev = hwmgr->adev; int result; READ_EFUSE_VALUE_PARAMETER efuse_param; @@ -1272,9 +1278,9 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, efuse_param.sEfuse.ucBitLength = (uint8_t) ((end_index - start_index) + 1); - result = cgs_atom_exec_cmd_table(device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), - &efuse_param); + (uint32_t *)&efuse_param); if (!result) *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; @@ -1284,6 +1290,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index, int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, uint8_t level) { + struct amdgpu_device *adev = hwmgr->adev; DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; int result; @@ -1293,10 +1300,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, ADJUST_MC_SETTING_PARAM; memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; - result = cgs_atom_exec_cmd_table - (hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), - &memory_clock_parameters); + (uint32_t *)&memory_clock_parameters); return result; } @@ -1304,7 +1310,7 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint32_t *voltage) { - + struct amdgpu_device *adev = hwmgr->adev; int result; GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_3 get_voltage_info_param_space; @@ -1313,9 +1319,9 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id); get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk); - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), - &get_voltage_info_param_space); + (uint32_t *)&get_voltage_info_param_space); if (0 != result) return result; @@ -1334,7 +1340,7 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr u16 size; ATOM_SMU_INFO_V2_1 *psmu_info = - (ATOM_SMU_INFO_V2_1 *)cgs_atom_get_data_table(hwmgr->device, + (ATOM_SMU_INFO_V2_1 *)smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, SMU_Info), &size, &frev, &crev); @@ -1362,7 +1368,7 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, return -EINVAL; profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), NULL, NULL, NULL); if (!profile) @@ -1402,7 +1408,7 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint16_t *load_line) { ATOM_VOLTAGE_OBJECT_INFO_V3_1 *voltage_info = - (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->device); + (ATOM_VOLTAGE_OBJECT_INFO_V3_1 *)get_voltage_info_table(hwmgr->adev); const ATOM_VOLTAGE_OBJECT_V3 *voltage_object; @@ -1421,16 +1427,17 @@ int atomctrl_get_svi2_info(struct pp_hwmgr *hwmgr, uint8_t voltage_type, int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id) { - int result; + struct amdgpu_device *adev = hwmgr->adev; SET_VOLTAGE_PS_ALLOCATION allocation; SET_VOLTAGE_PARAMETERS_V1_3 *voltage_parameters = (SET_VOLTAGE_PARAMETERS_V1_3 *)&allocation.sASICSetVoltage; + int result; voltage_parameters->ucVoltageMode = ATOM_GET_LEAKAGE_ID; - result = cgs_atom_exec_cmd_table(hwmgr->device, + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, SetVoltage), - voltage_parameters); + (uint32_t *)voltage_parameters); *virtual_voltage_id = voltage_parameters->usVoltageLevel; @@ -1453,7 +1460,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo); profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!profile) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index c44a92064cf1..c672a5069840 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -298,7 +298,7 @@ extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers); -extern int atomctrl_read_efuse(void *device, uint16_t start_index, +extern int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, uint16_t end_index, uint32_t mask, uint32_t *efuse); extern int atomctrl_calculate_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage, uint16_t dpm_level, bool debug); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index ad42caac033e..0adaf36b6d68 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -23,9 +23,9 @@ #include "ppatomfwctrl.h" #include "atomfirmware.h" +#include "atom.h" #include "pp_debug.h" - static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( const struct atom_voltage_objects_info_v4_1 *voltage_object_info_table, uint8_t voltage_type, uint8_t voltage_mode) @@ -38,35 +38,34 @@ static const union atom_voltage_object_v4 *pp_atomfwctrl_lookup_voltage_type_v4( while (offset < size) { const union atom_voltage_object_v4 *voltage_object = - (const union atom_voltage_object_v4 *)(start + offset); + (const union atom_voltage_object_v4 *)(start + offset); - if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && - voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) - return voltage_object; + if (voltage_type == voltage_object->gpio_voltage_obj.header.voltage_type && + voltage_mode == voltage_object->gpio_voltage_obj.header.voltage_mode) + return voltage_object; - offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); + offset += le16_to_cpu(voltage_object->gpio_voltage_obj.header.object_size); - } + } - return NULL; + return NULL; } static struct atom_voltage_objects_info_v4_1 *pp_atomfwctrl_get_voltage_info_table( struct pp_hwmgr *hwmgr) { - const void *table_address; - uint16_t idx; + const void *table_address; + uint16_t idx; - idx = GetIndexIntoMasterDataTable(voltageobject_info); - table_address = cgs_atom_get_data_table(hwmgr->device, - idx, NULL, NULL, NULL); + idx = GetIndexIntoMasterDataTable(voltageobject_info); + table_address = smu_atom_get_data_table(hwmgr->adev, + idx, NULL, NULL, NULL); - PP_ASSERT_WITH_CODE( - table_address, - "Error retrieving BIOS Table Address!", - return NULL); + PP_ASSERT_WITH_CODE(table_address, + "Error retrieving BIOS Table Address!", + return NULL); - return (struct atom_voltage_objects_info_v4_1 *)table_address; + return (struct atom_voltage_objects_info_v4_1 *)table_address; } /** @@ -167,7 +166,7 @@ static struct atom_gpio_pin_lut_v2_1 *pp_atomfwctrl_get_gpio_lookup_table( uint16_t idx; idx = GetIndexIntoMasterDataTable(gpio_pin_lut); - table_address = cgs_atom_get_data_table(hwmgr->device, + table_address = smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); PP_ASSERT_WITH_CODE(table_address, "Error retrieving BIOS Table Address!", @@ -248,28 +247,30 @@ int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr, uint32_t clock_type, uint32_t clock_value, struct pp_atomfwctrl_clock_dividers_soc15 *dividers) { + struct amdgpu_device *adev = hwmgr->adev; struct compute_gpu_clock_input_parameter_v1_8 pll_parameters; struct compute_gpu_clock_output_parameter_v1_8 *pll_output; - int result; uint32_t idx; pll_parameters.gpuclock_10khz = (uint32_t)clock_value; pll_parameters.gpu_clock_type = clock_type; idx = GetIndexIntoMasterCmdTable(computegpuclockparam); - result = cgs_atom_exec_cmd_table(hwmgr->device, idx, &pll_parameters); - - if (!result) { - pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) - &pll_parameters; - dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); - dividers->ulDid = le32_to_cpu(pll_output->dfs_did); - dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); - dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); - dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); - dividers->ucPll_ss_enable = pll_output->pll_ss_enable; - } - return result; + + if (amdgpu_atom_execute_table( + adev->mode_info.atom_context, idx, (uint32_t *)&pll_parameters)) + return -EINVAL; + + pll_output = (struct compute_gpu_clock_output_parameter_v1_8 *) + &pll_parameters; + dividers->ulClock = le32_to_cpu(pll_output->gpuclock_10khz); + dividers->ulDid = le32_to_cpu(pll_output->dfs_did); + dividers->ulPll_fb_mult = le32_to_cpu(pll_output->pll_fb_mult); + dividers->ulPll_ss_fbsmult = le32_to_cpu(pll_output->pll_ss_fbsmult); + dividers->usPll_ss_slew_frac = le16_to_cpu(pll_output->pll_ss_slew_frac); + dividers->ucPll_ss_enable = pll_output->pll_ss_enable; + + return 0; } int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, @@ -283,7 +284,7 @@ int pp_atomfwctrl_get_avfs_information(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterDataTable(asic_profiling_info); profile = (struct atom_asic_profiling_info_v4_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); if (!profile) @@ -467,7 +468,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, idx = GetIndexIntoMasterDataTable(smu_info); info = (struct atom_smu_info_v3_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, idx, NULL, NULL, NULL); if (!info) { @@ -489,6 +490,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) { + struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; struct atom_get_smu_clock_info_output_parameters_v3_1 *output; uint32_t ix; @@ -497,13 +499,13 @@ int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLK parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); - if (!cgs_atom_exec_cmd_table(hwmgr->device, ix, ¶meters)) { - output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; - *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; - } else { - pr_info("Error execute_table getsmuclockinfo!"); - return -1; - } + + if (amdgpu_atom_execute_table( + adev->mode_info.atom_context, ix, (uint32_t *)¶meters)) + return -EINVAL; + + output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; + *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; return 0; } @@ -517,7 +519,7 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterDataTable(firmwareinfo); info = (struct atom_firmware_info_v3_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!info) { @@ -553,7 +555,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, ix = GetIndexIntoMasterDataTable(smc_dpm_info); info = (struct atom_smc_dpm_info_v4_1 *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, ix, NULL, NULL, NULL); if (!info) { pr_info("Error retrieving BIOS Table Address!"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index c9eecce5683f..8516516eb6cc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -141,7 +141,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Tonga_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ hwmgr->soft_pp_table_size = size; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 36ca7c419c90..ce64dfabd34b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -837,7 +837,7 @@ static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( hwmgr->soft_pp_table = &soft_dummy_pp_table[0]; hwmgr->soft_pp_table_size = sizeof(soft_dummy_pp_table); } else { - table_addr = cgs_atom_get_data_table(hwmgr->device, + table_addr = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, PowerPlayInfo), &size, &frev, &crev); hwmgr->soft_pp_table = table_addr; @@ -1058,7 +1058,7 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, return 0; /* We assume here that fw_info is unchanged if this call fails.*/ - fw_info = cgs_atom_get_data_table(hwmgr->device, + fw_info = smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, FirmwareInfo), &size, &frev, &crev); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..9087ef91b50b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2957,8 +2957,7 @@ static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr, /* First retrieve the Boot clocks and VDDC from the firmware info table. * We assume here that fw_info is unchanged if this call fails. */ - fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)cgs_atom_get_data_table( - hwmgr->device, index, + fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); if (!fw_info) /* During a test, there is no firmware info table. */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 7b26607c646a..3ac07fabbe5c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -314,8 +314,7 @@ static int smu8_get_system_info_data(struct pp_hwmgr *hwmgr) uint8_t frev, crev; uint16_t size; - info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table( - hwmgr->device, + info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *)smu_atom_get_data_table(hwmgr->adev, GetIndexIntoMasterTable(DATA, IntegratedSystemInfo), &size, &frev, &crev); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 598122854ab5..529be3cd768a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -24,6 +24,7 @@ #include "pp_debug.h" #include "ppatomctrl.h" #include "ppsmc.h" +#include "atom.h" uint8_t convert_to_vid(uint16_t vddc) { @@ -608,3 +609,18 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr) return 0; } + +void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, + uint8_t *frev, uint8_t *crev) +{ + struct amdgpu_device *adev = dev; + uint16_t data_start; + + if (amdgpu_atom_parse_data_header( + adev->mode_info.atom_context, table, size, + frev, crev, &data_start)) + return (uint8_t *)adev->mode_info.atom_context->bios + + data_start; + + return NULL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h index d37d16e4b613..14ee162ac92a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h @@ -82,6 +82,9 @@ int phm_irq_process(struct amdgpu_device *adev, int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); +void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, + uint8_t *frev, uint8_t *crev); + #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index c61d0744860d..0768d259c07c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -52,7 +52,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Vega10_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, index, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index b34113f45904..7fa1ba89ac54 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -51,7 +51,7 @@ static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) if (!table_address) { table_address = (ATOM_Vega12_POWERPLAYTABLE *) - cgs_atom_get_data_table(hwmgr->device, index, + smu_atom_get_data_table(hwmgr->adev, index, &size, &frev, &crev); hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 35b947e5292c..1eec527add99 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -337,7 +337,7 @@ static bool fiji_is_hw_avfs_present(struct pp_hwmgr *hwmgr) if (!hwmgr->not_vf) return false; - if (!atomctrl_read_efuse(hwmgr->device, AVFS_EN_LSB, AVFS_EN_MSB, + if (!atomctrl_read_efuse(hwmgr, AVFS_EN_LSB, AVFS_EN_MSB, mask, &efuse)) { if (efuse) return true; -- cgit v1.2.1 From d91ea4969bc5edbbe3bd723a1b3ae7d947f62a5a Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 26 Mar 2018 22:08:29 +0800 Subject: drm/amdgpu: Set pm_display_cfg in non-dc mode those display informations are needed by powerplay. Reviewed-by: Huang Rui Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 31 ++++++++++++++++--------------- 3 files changed, 37 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index e997ebbe43ea..def1010ac05e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -115,6 +115,26 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, pr_cont("\n"); } +void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) +{ + struct drm_device *ddev = adev->ddev; + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + + adev->pm.dpm.new_active_crtcs = 0; + adev->pm.dpm.new_active_crtc_count = 0; + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (amdgpu_crtc->enabled) { + adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); + adev->pm.dpm.new_active_crtc_count++; + } + } + } +} + u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 643d008410c6..b8c5177fa809 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -482,6 +482,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); +void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); bool amdgpu_is_uvd_state(u32 class, u32 class2); void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, u32 *p, u32 *u); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 361975cf45a9..e6e365852f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1658,9 +1658,6 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) { - struct drm_device *ddev = adev->ddev; - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; int i = 0; if (!adev->pm.dpm_enabled) @@ -1675,22 +1672,26 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } + if (!amdgpu_device_has_dc_support(adev)) { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + if (adev->powerplay.pp_funcs->display_configuration_change) + adev->powerplay.pp_funcs->display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + mutex_unlock(&adev->pm.mutex); + } + if (adev->powerplay.pp_funcs->dispatch_tasks) { amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); } else { mutex_lock(&adev->pm.mutex); - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; - } - } - } /* update battery/ac status */ if (power_supply_is_system_supplied() > 0) adev->pm.dpm.ac_power = true; -- cgit v1.2.1 From 555fd70c59bc7f7acd8bc429d92bd59a66a7b83b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 27 Mar 2018 13:32:02 +0800 Subject: drm/amd/pp: Not call cgs interface to get display info DC/Non DC all will update display configuration when the display state changed No need to get display info through cgs interface Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 1 + .../gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 8 ++-- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 2 +- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 56 ++++++---------------- drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 14 ++---- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 51 ++++++-------------- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 29 ++++------- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 4 +- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 4 +- .../gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 6 +-- .../drm/amd/powerplay/smumgr/polaris10_smumgr.c | 8 ++-- .../gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 12 ++--- 13 files changed, 61 insertions(+), 136 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index b91ef113a490..1ca6a13be6a3 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -54,6 +54,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; hwmgr->feature_mask = amdgpu_pp_feature_mask; + hwmgr->display_config = &adev->pm.pm_display_cfg; adev->powerplay.pp_handle = hwmgr; adev->powerplay.pp_funcs = &pp_dpm_funcs; return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index dcceadb2e172..e411012b3dcb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -265,13 +265,11 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, if (display_config == NULL) return -EINVAL; - hwmgr->display_config = *display_config; - if (NULL != hwmgr->hwmgr_func->set_deep_sleep_dcefclk) - hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, hwmgr->display_config.min_dcef_deep_sleep_set_clk); + hwmgr->hwmgr_func->set_deep_sleep_dcefclk(hwmgr, display_config->min_dcef_deep_sleep_set_clk); - for (index = 0; index < hwmgr->display_config.num_path_including_non_display; index++) { - if (hwmgr->display_config.displays[index].controller_id != 0) + for (index = 0; index < display_config->num_path_including_non_display; index++) { + if (display_config->displays[index].controller_id != 0) number_of_active_display++; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 10253b89b3d8..055358b95fdf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -161,7 +161,7 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input) struct PP_Clocks clocks = {0}; struct pp_display_clock_request clock_req; - clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; + clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; clock_req.clock_type = amd_pp_dcf_clock; clock_req.clock_freq_in_khz = clocks.dcefClock * 10; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 9087ef91b50b..14332159227e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -2777,8 +2777,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct PP_Clocks minimum_clocks = {0}; bool disable_mclk_switching; bool disable_mclk_switching_for_frame_lock; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info = {0}; const struct phm_clock_and_voltage_limits *max_limits; uint32_t i; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -2787,7 +2785,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, int32_t count; int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; - info.mode_info = &mode_info; data->battery_state = (PP_StateUILabel_Battery == request_ps->classification.ui_label); @@ -2809,10 +2806,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - cgs_get_active_displays_info(hwmgr->device, &info); - - minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; - minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { @@ -2843,12 +2838,12 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); - if (info.display_count == 0) + if (hwmgr->display_config->num_display == 0) disable_mclk_switching = false; else - disable_mclk_switching = ((1 < info.display_count) || + disable_mclk_switching = ((1 < hwmgr->display_config->num_display) || disable_mclk_switching_for_frame_lock || - smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us)); + smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time)); sclk = smu7_ps->performance_levels[0].engine_clock; mclk = smu7_ps->performance_levels[0].memory_clock; @@ -3479,7 +3474,6 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons [smu7_ps->performance_level_count - 1].memory_clock; struct PP_Clocks min_clocks = {0}; uint32_t i; - struct cgs_display_info info = {0}; for (i = 0; i < sclk_table->count; i++) { if (sclk == sclk_table->dpm_levels[i].value) @@ -3506,9 +3500,8 @@ static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, cons if (i >= mclk_table->count) data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK; return 0; @@ -3907,15 +3900,8 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display) static int smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) { - uint32_t num_active_displays = 0; - struct cgs_display_info info = {0}; - - info.mode_info = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_displays = info.display_count; - - if (num_active_displays > 1 && hwmgr->display_config.multi_monitor_in_sync != true) + if (hwmgr->display_config->num_display > 1 && + !hwmgr->display_config->multi_monitor_in_sync) smu7_notify_smc_display_change(hwmgr, false); return 0; @@ -3930,33 +3916,24 @@ smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr) static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t num_active_displays = 0; uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL); uint32_t display_gap2; uint32_t pre_vbi_time_in_us; uint32_t frame_time_in_us; - uint32_t ref_clock; - uint32_t refresh_rate = 0; - struct cgs_display_info info = {0}; - struct cgs_mode_info mode_info = {0}; + uint32_t ref_clock, refresh_rate; - info.mode_info = &mode_info; - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_displays = info.display_count; - - display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (num_active_displays > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); + display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap); ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); - - refresh_rate = mode_info.refresh_rate; + refresh_rate = hwmgr->display_config->vrefresh; if (0 == refresh_rate) refresh_rate = 60; frame_time_in_us = 1000000 / refresh_rate; - pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; + pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time; data->frame_time_x2 = frame_time_in_us * 2 / 100; @@ -4036,17 +4013,14 @@ smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr && + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr && (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK || - hwmgr->display_config.min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) + hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK)) is_update_required = true; } return is_update_required; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index 3ac07fabbe5c..c2f93aa1d2e8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -693,7 +693,7 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr) else data->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk; - clock = hwmgr->display_config.min_core_set_clock; + clock = hwmgr->display_config->min_core_set_clock; if (clock == 0) pr_debug("min_core_set_clock not set\n"); @@ -748,7 +748,7 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { - uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr; + uint32_t clks = hwmgr->display_config->min_core_set_clock_in_sr; if (clks == 0) clks = SMU8_MIN_DEEP_SLEEP_SCLK; @@ -1040,25 +1040,21 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct smu8_hwmgr *data = hwmgr->backend; struct PP_Clocks clocks = {0, 0, 0, 0}; bool force_high; - uint32_t num_of_active_displays = 0; - struct cgs_display_info info = {0}; smu8_ps->need_dfs_bypass = true; data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label); - clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ? - hwmgr->display_config.min_mem_set_clock : + clocks.memoryClock = hwmgr->display_config->min_mem_set_clock != 0 ? + hwmgr->display_config->min_mem_set_clock : data->sys_info.nbp_memory_clock[1]; - cgs_get_active_displays_info(hwmgr->device, &info); - num_of_active_displays = info.display_count; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk; force_high = (clocks.memoryClock > data->sys_info.nbp_memory_clock[SMU8_NUM_NBPMEMORYCLOCK - 1]) - || (num_of_active_displays >= 3); + || (hwmgr->display_config->num_display >= 3); smu8_ps->action = smu8_current_ps->action; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 7cbb56ba6fab..c9fb4b2cf5c6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3028,7 +3028,6 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, bool disable_mclk_switching_for_frame_lock; bool disable_mclk_switching_for_vr; bool force_mclk_high; - struct cgs_display_info info = {0}; const struct phm_clock_and_voltage_limits *max_limits; uint32_t i; struct vega10_hwmgr *data = hwmgr->backend; @@ -3063,11 +3062,9 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, } } - cgs_get_active_displays_info(hwmgr->device, &info); - /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ - minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; - minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock; + minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (PP_CAP(PHM_PlatformCaps_StablePState)) { stable_pstate_sclk_dpm_percentage = @@ -3107,10 +3104,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR); force_mclk_high = PP_CAP(PHM_PlatformCaps_ForceMclkHigh); - if (info.display_count == 0) + if (hwmgr->display_config->num_display == 0) disable_mclk_switching = false; else - disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching = (hwmgr->display_config->num_display > 1) || disable_mclk_switching_for_frame_lock || disable_mclk_switching_for_vr || force_mclk_high; @@ -3186,7 +3183,6 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co [vega10_ps->performance_level_count - 1].mem_clock; struct PP_Clocks min_clocks = {0}; uint32_t i; - struct cgs_display_info info = {0}; data->need_update_dpm_table = 0; @@ -3211,10 +3207,8 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; } - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != - info.display_count) + hwmgr->display_config->num_display) data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; } else { for (i = 0; i < sclk_table->count; i++) { @@ -3242,13 +3236,11 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co break; } - cgs_get_active_displays_info(hwmgr->device, &info); - if (i >= mclk_table->count) data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; if (data->display_timing.num_existing_displays != - info.display_count || + hwmgr->display_config->num_display || i >= mclk_table->count) data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; } @@ -3956,26 +3948,18 @@ static int vega10_notify_smc_display_config_after_ps_adjustment( (struct phm_ppt_v2_information *)hwmgr->pptable; struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk; uint32_t idx; - uint32_t num_active_disps = 0; - struct cgs_display_info info = {0}; struct PP_Clocks min_clocks = {0}; uint32_t i; struct pp_display_clock_request clock_req; - info.mode_info = NULL; - - cgs_get_active_displays_info(hwmgr->device, &info); - - num_active_disps = info.display_count; - - if (num_active_disps > 1) + if (hwmgr->display_config->num_display > 1) vega10_notify_smc_display_change(hwmgr, false); else vega10_notify_smc_display_change(hwmgr, true); - min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; - min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; for (i = 0; i < dpm_table->count; i++) { if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) @@ -4501,10 +4485,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; - int result = 0; - uint32_t num_turned_on_displays = 1; Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); - struct cgs_display_info info = {0}; + int result = 0; if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { @@ -4514,10 +4496,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) } if (data->water_marks_bitmap & WaterMarksLoaded) { - cgs_get_active_displays_info(hwmgr->device, &info); - num_turned_on_displays = info.display_count; smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, num_turned_on_displays); + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); } return result; @@ -4603,15 +4583,12 @@ vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg { struct vega10_hwmgr *data = hwmgr->backend; bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep)) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr) is_update_required = true; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 200de46bd06b..6a85238ae20f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -1260,23 +1260,18 @@ static int vega12_notify_smc_display_config_after_ps_adjustment( { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); - uint32_t num_active_disps = 0; - struct cgs_display_info info = {0}; struct PP_Clocks min_clocks = {0}; struct pp_display_clock_request clock_req; uint32_t clk_request; - info.mode_info = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); - num_active_disps = info.display_count; - if (num_active_disps > 1) + if (hwmgr->display_config->num_display > 1) vega12_notify_smc_display_change(hwmgr, false); else vega12_notify_smc_display_change(hwmgr, true); - min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; - min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; - min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; + min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk; + min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock; if (data->smu_features[GNLD_DPM_DCEFCLK].supported) { clock_req.clock_type = amd_pp_dcef_clock; @@ -1832,9 +1827,7 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); int result = 0; - uint32_t num_turned_on_displays = 1; Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); - struct cgs_display_info info = {0}; if ((data->water_marks_bitmap & WaterMarksExist) && !(data->water_marks_bitmap & WaterMarksLoaded)) { @@ -1846,12 +1839,9 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr) if ((data->water_marks_bitmap & WaterMarksExist) && data->smu_features[GNLD_DPM_DCEFCLK].supported && - data->smu_features[GNLD_DPM_SOCCLK].supported) { - cgs_get_active_displays_info(hwmgr->device, &info); - num_turned_on_displays = info.display_count; + data->smu_features[GNLD_DPM_SOCCLK].supported) smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_NumOfDisplays, num_turned_on_displays); - } + PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display); return result; } @@ -1894,15 +1884,12 @@ vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmg { struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend); bool is_update_required = false; - struct cgs_display_info info = {0, 0, NULL}; - - cgs_get_active_displays_info(hwmgr->device, &info); - if (data->display_timing.num_existing_displays != info.display_count) + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) is_update_required = true; if (data->registry_data.gfx_clk_deep_sleep_support) { - if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr) + if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr) is_update_required = true; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index d5cadc61c9b3..e450ec74d6ed 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -765,7 +765,7 @@ struct pp_hwmgr { struct pp_power_state *request_ps; struct pp_power_state *boot_ps; struct pp_power_state *uvd_ps; - struct amd_pp_display_configuration display_config; + const struct amd_pp_display_configuration *display_config; uint32_t feature_mask; bool avfs_supported; /* UMD Pstate */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index e30a2eea1fba..c28b95fd1c85 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -1182,7 +1182,6 @@ static int ci_populate_single_memory_level( struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; @@ -1236,8 +1235,7 @@ static int ci_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; /* stutter mode not support on ci */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 1eec527add99..d023494c3eae 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -988,11 +988,11 @@ static int fiji_populate_single_graphic_level(struct pp_hwmgr *hwmgr, threshold = clock * data->fast_watermark_threshold / 100; - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); + hwmgr->display_config->min_core_set_clock_in_sr); /* Default to slow, highest DPM level will be diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index d4bb934e7334..bc05e355012d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -932,7 +932,7 @@ static int iceland_populate_single_graphic_level(struct pp_hwmgr *hwmgr, graphic_level->PowerThrottle = 0; data->display_timing.min_clock_in_sr = - hwmgr->display_config.min_core_set_clock_in_sr; + hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -1236,7 +1236,6 @@ static int iceland_populate_single_memory_level( struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); int result = 0; bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; @@ -1283,8 +1282,7 @@ static int iceland_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; /* stutter mode not support on iceland */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index 05e60e8fee0b..d9192286099d 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -942,11 +942,11 @@ static int polaris10_populate_single_graphic_level(struct pp_hwmgr *hwmgr, level->DownHyst = data->current_profile_setting.sclk_down_hyst; level->VoltageDownHyst = 0; level->PowerThrottle = 0; - data->display_timing.min_clock_in_sr = hwmgr->display_config.min_core_set_clock_in_sr; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) level->DeepSleepDivId = smu7_get_sleep_divider_id_from_clock(clock, - hwmgr->display_config.min_core_set_clock_in_sr); + hwmgr->display_config->min_core_set_clock_in_sr); /* Default to slow, highest DPM level will be * set to PPSMC_DISPLAY_WATERMARK_LOW later. @@ -1076,11 +1076,9 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); int result = 0; - struct cgs_display_info info = {0, 0, NULL}; uint32_t mclk_stutter_mode_threshold = 40000; phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; - cgs_get_active_displays_info(hwmgr->device, &info); if (hwmgr->od_enabled) vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; @@ -1106,7 +1104,7 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->StutterEnable = false; mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; if (mclk_stutter_mode_threshold && (clock <= mclk_stutter_mode_threshold) && diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 2ba05d2b4302..94ba304ff52e 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -650,7 +650,7 @@ static int tonga_populate_single_graphic_level(struct pp_hwmgr *hwmgr, graphic_level->PowerThrottle = 0; data->display_timing.min_clock_in_sr = - hwmgr->display_config.min_core_set_clock_in_sr; + hwmgr->display_config->min_core_set_clock_in_sr; if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) @@ -956,18 +956,17 @@ static int tonga_populate_single_memory_level( SMU72_Discrete_MemoryLevel *memory_level ) { - uint32_t mvdd = 0; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - int result = 0; - bool dll_state_on; - struct cgs_display_info info = {0}; uint32_t mclk_edc_wr_enable_threshold = 40000; uint32_t mclk_stutter_mode_threshold = 30000; uint32_t mclk_edc_enable_threshold = 40000; uint32_t mclk_strobe_mode_threshold = 40000; phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table = NULL; + int result = 0; + bool dll_state_on; + uint32_t mvdd = 0; if (hwmgr->od_enabled) vdd_dep_table = (phm_ppt_v1_clock_voltage_dependency_table *)&data->odn_dpm_table.vdd_dependency_on_mclk; @@ -1008,8 +1007,7 @@ static int tonga_populate_single_memory_level( /* default set to low watermark. Highest level will be set to high later.*/ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; - cgs_get_active_displays_info(hwmgr->device, &info); - data->display_timing.num_existing_displays = info.display_count; + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; if ((mclk_stutter_mode_threshold != 0) && (memory_clock <= mclk_stutter_mode_threshold) && -- cgit v1.2.1 From 10b3f45c4a189ffa0e0b9566fce7a0b65b289322 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 22 Mar 2018 19:32:45 +0800 Subject: drm/amdgpu: Delete some cgs functions Drop cgs wrappers that are no longer used. 1. cgs_rel_firmwar 2. cgs_is_virtualization_enabled 3. cgs_notify_dpm_enabled 4. cgs_atom_get_data_table 5. cgs_atom_get_cmd_table_revs 6. cgs_atom_exec_cmd_table 7. cgs_get_active_displays_info Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 197 ------------------------------- drivers/gpu/drm/amd/include/cgs_common.h | 139 ---------------------- 2 files changed, 336 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 71a57b2f7f04..dc28fa63bf51 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -23,7 +23,6 @@ */ #include #include -#include #include #include #include @@ -109,78 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, WARN(1, "Invalid indirect register space"); } -static int amdgpu_cgs_get_pci_resource(struct cgs_device *cgs_device, - enum cgs_resource_type resource_type, - uint64_t size, - uint64_t offset, - uint64_t *resource_base) -{ - CGS_FUNC_ADEV; - - if (resource_base == NULL) - return -EINVAL; - - switch (resource_type) { - case CGS_RESOURCE_TYPE_MMIO: - if (adev->rmmio_size == 0) - return -ENOENT; - if ((offset + size) > adev->rmmio_size) - return -EINVAL; - *resource_base = adev->rmmio_base; - return 0; - case CGS_RESOURCE_TYPE_DOORBELL: - if (adev->doorbell.size == 0) - return -ENOENT; - if ((offset + size) > adev->doorbell.size) - return -EINVAL; - *resource_base = adev->doorbell.base; - return 0; - case CGS_RESOURCE_TYPE_FB: - case CGS_RESOURCE_TYPE_IO: - case CGS_RESOURCE_TYPE_ROM: - default: - return -EINVAL; - } -} - -static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device *cgs_device, - unsigned table, uint16_t *size, - uint8_t *frev, uint8_t *crev) -{ - CGS_FUNC_ADEV; - uint16_t data_start; - - if (amdgpu_atom_parse_data_header( - adev->mode_info.atom_context, table, size, - frev, crev, &data_start)) - return (uint8_t*)adev->mode_info.atom_context->bios + - data_start; - - return NULL; -} - -static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device *cgs_device, unsigned table, - uint8_t *frev, uint8_t *crev) -{ - CGS_FUNC_ADEV; - - if (amdgpu_atom_parse_cmd_header( - adev->mode_info.atom_context, table, - frev, crev)) - return 0; - - return -EINVAL; -} - -static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device *cgs_device, unsigned table, - void *args) -{ - CGS_FUNC_ADEV; - - return amdgpu_atom_execute_table( - adev->mode_info.atom_context, table, args); -} - static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state) @@ -223,7 +150,6 @@ static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, return r; } - static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) { CGS_FUNC_ADEV; @@ -271,18 +197,6 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) return result; } -static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) -{ - CGS_FUNC_ADEV; - if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - return 0; - } - /* cannot release other firmware because they are not created by cgs */ - return -EINVAL; -} - static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, enum cgs_ucode_id type) { @@ -326,34 +240,6 @@ static uint16_t amdgpu_get_firmware_version(struct cgs_device *cgs_device, return fw_version; } -static int amdgpu_cgs_enter_safe_mode(struct cgs_device *cgs_device, - bool en) -{ - CGS_FUNC_ADEV; - - if (adev->gfx.rlc.funcs->enter_safe_mode == NULL || - adev->gfx.rlc.funcs->exit_safe_mode == NULL) - return 0; - - if (en) - adev->gfx.rlc.funcs->enter_safe_mode(adev); - else - adev->gfx.rlc.funcs->exit_safe_mode(adev); - - return 0; -} - -static void amdgpu_cgs_lock_grbm_idx(struct cgs_device *cgs_device, - bool lock) -{ - CGS_FUNC_ADEV; - - if (lock) - mutex_lock(&adev->grbm_idx_mutex); - else - mutex_unlock(&adev->grbm_idx_mutex); -} - static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -598,97 +484,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, return 0; } -static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device) -{ - CGS_FUNC_ADEV; - return amdgpu_sriov_vf(adev); -} - -static int amdgpu_cgs_get_active_displays_info(struct cgs_device *cgs_device, - struct cgs_display_info *info) -{ - CGS_FUNC_ADEV; - struct cgs_mode_info *mode_info; - - if (info == NULL) - return -EINVAL; - - mode_info = info->mode_info; - if (mode_info) - /* if the displays are off, vblank time is max */ - mode_info->vblank_time_us = 0xffffffff; - - if (!amdgpu_device_has_dc_support(adev)) { - struct amdgpu_crtc *amdgpu_crtc; - struct drm_device *ddev = adev->ddev; - struct drm_crtc *crtc; - uint32_t line_time_us, vblank_lines; - - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled) { - info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); - info->display_count++; - } - if (mode_info != NULL && - crtc->enabled && amdgpu_crtc->enabled && - amdgpu_crtc->hw_mode.clock) { - line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / - amdgpu_crtc->hw_mode.clock; - vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2); - mode_info->vblank_time_us = vblank_lines * line_time_us; - mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - /* we have issues with mclk switching with refresh rates - * over 120 hz on the non-DC code. - */ - if (mode_info->refresh_rate > 120) - mode_info->vblank_time_us = 0; - mode_info = NULL; - } - } - } - } else { - info->display_count = adev->pm.pm_display_cfg.num_display; - if (mode_info != NULL) { - mode_info->vblank_time_us = adev->pm.pm_display_cfg.min_vblank_time; - mode_info->refresh_rate = adev->pm.pm_display_cfg.vrefresh; - } - } - return 0; -} - - -static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device *cgs_device, bool enabled) -{ - CGS_FUNC_ADEV; - - adev->pm.dpm_enabled = enabled; - - return 0; -} - static const struct cgs_ops amdgpu_cgs_ops = { .read_register = amdgpu_cgs_read_register, .write_register = amdgpu_cgs_write_register, .read_ind_register = amdgpu_cgs_read_ind_register, .write_ind_register = amdgpu_cgs_write_ind_register, - .get_pci_resource = amdgpu_cgs_get_pci_resource, - .atom_get_data_table = amdgpu_cgs_atom_get_data_table, - .atom_get_cmd_table_revs = amdgpu_cgs_atom_get_cmd_table_revs, - .atom_exec_cmd_table = amdgpu_cgs_atom_exec_cmd_table, .get_firmware_info = amdgpu_cgs_get_firmware_info, - .rel_firmware = amdgpu_cgs_rel_firmware, .set_powergating_state = amdgpu_cgs_set_powergating_state, .set_clockgating_state = amdgpu_cgs_set_clockgating_state, - .get_active_displays_info = amdgpu_cgs_get_active_displays_info, - .notify_dpm_enabled = amdgpu_cgs_notify_dpm_enabled, - .is_virtualization_enabled = amdgpu_cgs_is_virtualization_enabled, - .enter_safe_mode = amdgpu_cgs_enter_safe_mode, - .lock_grbm_idx = amdgpu_cgs_lock_grbm_idx, }; struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index f2814ae7ecdd..cab34a4b65cc 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -76,17 +76,6 @@ enum cgs_ucode_id { CGS_UCODE_ID_MAXIMUM, }; -/* - * enum cgs_resource_type - GPU resource type - */ -enum cgs_resource_type { - CGS_RESOURCE_TYPE_MMIO = 0, - CGS_RESOURCE_TYPE_FB, - CGS_RESOURCE_TYPE_IO, - CGS_RESOURCE_TYPE_DOORBELL, - CGS_RESOURCE_TYPE_ROM, -}; - /** * struct cgs_firmware_info - Firmware information */ @@ -104,17 +93,6 @@ struct cgs_firmware_info { bool is_kicker; }; -struct cgs_mode_info { - uint32_t refresh_rate; - uint32_t vblank_time_us; -}; - -struct cgs_display_info { - uint32_t display_count; - uint32_t active_display_mask; - struct cgs_mode_info *mode_info; -}; - typedef unsigned long cgs_handle_t; /** @@ -170,73 +148,10 @@ typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs #define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \ cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field)) -/** - * cgs_get_pci_resource() - provide access to a device resource (PCI BAR) - * @cgs_device: opaque device handle - * @resource_type: Type of Resource (MMIO, IO, ROM, FB, DOORBELL) - * @size: size of the region - * @offset: offset from the start of the region - * @resource_base: base address (not including offset) returned - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_get_pci_resource_t)(struct cgs_device *cgs_device, - enum cgs_resource_type resource_type, - uint64_t size, - uint64_t offset, - uint64_t *resource_base); - -/** - * cgs_atom_get_data_table() - Get a pointer to an ATOM BIOS data table - * @cgs_device: opaque device handle - * @table: data table index - * @size: size of the table (output, may be NULL) - * @frev: table format revision (output, may be NULL) - * @crev: table content revision (output, may be NULL) - * - * Return: Pointer to start of the table, or NULL on failure - */ -typedef const void *(*cgs_atom_get_data_table_t)( - struct cgs_device *cgs_device, unsigned table, - uint16_t *size, uint8_t *frev, uint8_t *crev); - -/** - * cgs_atom_get_cmd_table_revs() - Get ATOM BIOS command table revisions - * @cgs_device: opaque device handle - * @table: data table index - * @frev: table format revision (output, may be NULL) - * @crev: table content revision (output, may be NULL) - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_atom_get_cmd_table_revs_t)(struct cgs_device *cgs_device, unsigned table, - uint8_t *frev, uint8_t *crev); - -/** - * cgs_atom_exec_cmd_table() - Execute an ATOM BIOS command table - * @cgs_device: opaque device handle - * @table: command table index - * @args: arguments - * - * Return: 0 on success, -errno otherwise - */ -typedef int (*cgs_atom_exec_cmd_table_t)(struct cgs_device *cgs_device, - unsigned table, void *args); - -/** - * cgs_get_firmware_info - Get the firmware information from core driver - * @cgs_device: opaque device handle - * @type: the firmware type - * @info: returend firmware information - * - * Return: 0 on success, -errno otherwise - */ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info); -typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, - enum cgs_ucode_id type); typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, @@ -246,43 +161,17 @@ typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_clockgating_state state); -typedef int(*cgs_get_active_displays_info)( - struct cgs_device *cgs_device, - struct cgs_display_info *info); - -typedef int (*cgs_notify_dpm_enabled)(struct cgs_device *cgs_device, bool enabled); - -typedef int (*cgs_is_virtualization_enabled_t)(void *cgs_device); - -typedef int (*cgs_enter_safe_mode)(struct cgs_device *cgs_device, bool en); - -typedef void (*cgs_lock_grbm_idx)(struct cgs_device *cgs_device, bool lock); - struct cgs_ops { /* MMIO access */ cgs_read_register_t read_register; cgs_write_register_t write_register; cgs_read_ind_register_t read_ind_register; cgs_write_ind_register_t write_ind_register; - /* PCI resources */ - cgs_get_pci_resource_t get_pci_resource; - /* ATOM BIOS */ - cgs_atom_get_data_table_t atom_get_data_table; - cgs_atom_get_cmd_table_revs_t atom_get_cmd_table_revs; - cgs_atom_exec_cmd_table_t atom_exec_cmd_table; /* Firmware Info */ cgs_get_firmware_info get_firmware_info; - cgs_rel_firmware rel_firmware; /* cg pg interface*/ cgs_set_powergating_state set_powergating_state; cgs_set_clockgating_state set_clockgating_state; - /* display manager */ - cgs_get_active_displays_info get_active_displays_info; - /* notify dpm enabled */ - cgs_notify_dpm_enabled notify_dpm_enabled; - cgs_is_virtualization_enabled_t is_virtualization_enabled; - cgs_enter_safe_mode enter_safe_mode; - cgs_lock_grbm_idx lock_grbm_idx; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -309,40 +198,12 @@ struct cgs_device #define cgs_write_ind_register(dev,space,index,value) \ CGS_CALL(write_ind_register,dev,space,index,value) -#define cgs_atom_get_data_table(dev,table,size,frev,crev) \ - CGS_CALL(atom_get_data_table,dev,table,size,frev,crev) -#define cgs_atom_get_cmd_table_revs(dev,table,frev,crev) \ - CGS_CALL(atom_get_cmd_table_revs,dev,table,frev,crev) -#define cgs_atom_exec_cmd_table(dev,table,args) \ - CGS_CALL(atom_exec_cmd_table,dev,table,args) - #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) -#define cgs_rel_firmware(dev, type) \ - CGS_CALL(rel_firmware, dev, type) #define cgs_set_powergating_state(dev, block_type, state) \ CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ CGS_CALL(set_clockgating_state, dev, block_type, state) -#define cgs_notify_dpm_enabled(dev, enabled) \ - CGS_CALL(notify_dpm_enabled, dev, enabled) - -#define cgs_get_active_displays_info(dev, info) \ - CGS_CALL(get_active_displays_info, dev, info) - -#define cgs_get_pci_resource(cgs_device, resource_type, size, offset, \ - resource_base) \ - CGS_CALL(get_pci_resource, cgs_device, resource_type, size, offset, \ - resource_base) - -#define cgs_is_virtualization_enabled(cgs_device) \ - CGS_CALL(is_virtualization_enabled, cgs_device) - -#define cgs_enter_safe_mode(cgs_device, en) \ - CGS_CALL(enter_safe_mode, cgs_device, en) - -#define cgs_lock_grbm_idx(cgs_device, lock) \ - CGS_CALL(lock_grbm_idx, cgs_device, lock) #endif /* _CGS_COMMON_H */ -- cgit v1.2.1 From 43fa561fd07fe707815d1b72472f6f5829223a52 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 28 Mar 2018 13:42:45 -0500 Subject: drm/amdgpu: remove duplicate cg/pg wrapper functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 44 ---------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++- drivers/gpu/drm/amd/include/cgs_common.h | 31 --------------- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 8 ++-- .../amd/powerplay/hwmgr/smu7_clockpowergating.c | 16 ++++---- drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c | 20 ++++------ drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 8 ++-- 8 files changed, 30 insertions(+), 107 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 0193f6ced00b..3000c4abe34f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -222,10 +222,10 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_LAST }; -int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, enum amd_clockgating_state state); -int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_powergating_state(void *dev, enum amd_ip_block_type block_type, enum amd_powergating_state state); void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index dc28fa63bf51..a8a942c60ea2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -108,48 +108,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, WARN(1, "Invalid indirect register space"); } -static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device, - enum amd_ip_block_type block_type, - enum amd_clockgating_state state) -{ - CGS_FUNC_ADEV; - int i, r = -1; - - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - - if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->set_clockgating_state( - (void *)adev, - state); - break; - } - } - return r; -} - -static int amdgpu_cgs_set_powergating_state(struct cgs_device *cgs_device, - enum amd_ip_block_type block_type, - enum amd_powergating_state state) -{ - CGS_FUNC_ADEV; - int i, r = -1; - - for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_blocks[i].status.valid) - continue; - - if (adev->ip_blocks[i].version->type == block_type) { - r = adev->ip_blocks[i].version->funcs->set_powergating_state( - (void *)adev, - state); - break; - } - } - return r; -} - static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) { CGS_FUNC_ADEV; @@ -490,8 +448,6 @@ static const struct cgs_ops amdgpu_cgs_ops = { .read_ind_register = amdgpu_cgs_read_ind_register, .write_ind_register = amdgpu_cgs_write_ind_register, .get_firmware_info = amdgpu_cgs_get_firmware_info, - .set_powergating_state = amdgpu_cgs_set_powergating_state, - .set_clockgating_state = amdgpu_cgs_set_clockgating_state, }; struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 34af664b9f93..a53926580b3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1039,10 +1039,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = { * the hardware IP specified. * Returns the error code from the last instance. */ -int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, enum amd_clockgating_state state) { + struct amdgpu_device *adev = dev; int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1072,10 +1073,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, * the hardware IP specified. * Returns the error code from the last instance. */ -int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_powergating_state(void *dev, enum amd_ip_block_type block_type, enum amd_powergating_state state) { + struct amdgpu_device *adev = dev; int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index cab34a4b65cc..a69deb3a2ac0 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -42,20 +42,6 @@ enum cgs_ind_reg { CGS_IND_REG__AUDIO_ENDPT }; -/** - * enum cgs_engine - Engines that can be statically power-gated - */ -enum cgs_engine { - CGS_ENGINE__UVD, - CGS_ENGINE__VCE, - CGS_ENGINE__VP8, - CGS_ENGINE__ACP_DMA, - CGS_ENGINE__ACP_DSP0, - CGS_ENGINE__ACP_DSP1, - CGS_ENGINE__ISP, - /* ... */ -}; - /* * enum cgs_ucode_id - Firmware types for different IPs */ @@ -152,15 +138,6 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info); - -typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, - enum amd_ip_block_type block_type, - enum amd_powergating_state state); - -typedef int(*cgs_set_clockgating_state)(struct cgs_device *cgs_device, - enum amd_ip_block_type block_type, - enum amd_clockgating_state state); - struct cgs_ops { /* MMIO access */ cgs_read_register_t read_register; @@ -169,9 +146,6 @@ struct cgs_ops { cgs_write_ind_register_t write_ind_register; /* Firmware Info */ cgs_get_firmware_info get_firmware_info; - /* cg pg interface*/ - cgs_set_powergating_state set_powergating_state; - cgs_set_clockgating_state set_clockgating_state; }; struct cgs_os_ops; /* To be define in OS-specific CGS header */ @@ -200,10 +174,5 @@ struct cgs_device #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) -#define cgs_set_powergating_state(dev, block_type, state) \ - CGS_CALL(set_powergating_state, dev, block_type, state) -#define cgs_set_clockgating_state(dev, block_type, state) \ - CGS_CALL(set_clockgating_state, dev, block_type, state) - #endif /* _CGS_COMMON_H */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 1ca6a13be6a3..66c49b89cdb4 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -288,10 +288,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_UNGATE); } @@ -301,10 +301,10 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) *level = hwmgr->saved_dpm_level; hwmgr->en_umd_pstate = false; - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_PG_STATE_GATE); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index f4cbaee4e2ca..6d72a5600917 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c @@ -147,20 +147,20 @@ void smu7_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); smu7_update_uvd_dpm(hwmgr, true); smu7_powerdown_uvd(hwmgr); } else { smu7_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); smu7_update_uvd_dpm(hwmgr, false); @@ -175,20 +175,20 @@ void smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) data->vce_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); smu7_update_vce_dpm(hwmgr, true); smu7_powerdown_vce(hwmgr); } else { smu7_powerup_vce(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); smu7_update_vce_dpm(hwmgr, false); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c index c2f93aa1d2e8..50690c72b2ea 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c @@ -1892,20 +1892,20 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_GATE); smu8_dpm_update_uvd_dpm(hwmgr, true); smu8_dpm_powerdown_uvd(hwmgr); } else { smu8_dpm_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state(hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_UVD, AMD_PG_STATE_UNGATE); smu8_dpm_update_uvd_dpm(hwmgr, false); @@ -1918,12 +1918,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) struct smu8_hwmgr *data = hwmgr->backend; if (bgate) { - cgs_set_powergating_state( - hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); - cgs_set_clockgating_state( - hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_GATE); smu8_enable_disable_vce_dpm(hwmgr, false); @@ -1932,12 +1930,10 @@ static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } else { smu8_dpm_powerup_vce(hwmgr); data->vce_power_gated = false; - cgs_set_clockgating_state( - hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); - cgs_set_powergating_state( - hwmgr->device, + amdgpu_device_ip_set_powergating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_UNGATE); smu8_dpm_update_vce_dpm(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index d023494c3eae..dae3422366b3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -306,13 +306,13 @@ static int fiji_start_smu(struct pp_hwmgr *hwmgr) } /* To initialize all clock gating before RLC loaded and running.*/ - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GFX, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_GMC, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_SDMA, AMD_CG_STATE_GATE); - cgs_set_clockgating_state(hwmgr->device, + amdgpu_device_ip_set_clockgating_state(hwmgr->adev, AMD_IP_BLOCK_TYPE_COMMON, AMD_CG_STATE_GATE); /* Setup SoftRegsStart here for register lookup in case -- cgit v1.2.1 From 2b816a1d773e755332733a89bdd276e08f935933 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 16:37:30 -0500 Subject: drm/amdgpu/sdma4: use a helper for SDMA_OP_POLL_REGMEM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than opencoding it in a bunch of functions. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 62 +++++++++++++++++++--------------- 1 file changed, 34 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 399f876f9cad..2c618a1be03e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -360,6 +360,31 @@ static void sdma_v4_0_ring_emit_ib(struct amdgpu_ring *ring, } +static void sdma_v4_0_wait_reg_mem(struct amdgpu_ring *ring, + int mem_space, int hdp, + uint32_t addr0, uint32_t addr1, + uint32_t ref, uint32_t mask, + uint32_t inv) +{ + amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | + SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) | + SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) | + SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ + if (mem_space) { + /* memory */ + amdgpu_ring_write(ring, addr0); + amdgpu_ring_write(ring, addr1); + } else { + /* registers */ + amdgpu_ring_write(ring, addr0 << 2); + amdgpu_ring_write(ring, addr1 << 2); + } + amdgpu_ring_write(ring, ref); /* reference */ + amdgpu_ring_write(ring, mask); /* mask */ + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */ +} + /** * sdma_v4_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring * @@ -378,15 +403,10 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) else ref_and_mask = nbio_hf_reg->ref_and_mask_sdma1; - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | - SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | - SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2); - amdgpu_ring_write(ring, ref_and_mask); /* reference */ - amdgpu_ring_write(ring, ref_and_mask); /* mask */ - amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | - SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ + sdma_v4_0_wait_reg_mem(ring, 0, 1, + adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio_funcs->get_hdp_flush_req_offset(adev), + ref_and_mask, ref_and_mask, 10); } /** @@ -1114,16 +1134,10 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) uint64_t addr = ring->fence_drv.gpu_addr; /* wait for idle */ - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | - SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | - SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ - SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); - amdgpu_ring_write(ring, addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); - amdgpu_ring_write(ring, seq); /* reference */ - amdgpu_ring_write(ring, 0xffffffff); /* mask */ - amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | - SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ + sdma_v4_0_wait_reg_mem(ring, 1, 0, + addr & 0xfffffffc, + upper_32_bits(addr) & 0xffffffff, + seq, 0xffffffff, 4); } @@ -1154,15 +1168,7 @@ static void sdma_v4_0_ring_emit_wreg(struct amdgpu_ring *ring, static void sdma_v4_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) { - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | - SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | - SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */ - amdgpu_ring_write(ring, reg << 2); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, val); /* reference */ - amdgpu_ring_write(ring, mask); /* mask */ - amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | - SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); + sdma_v4_0_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10); } static int sdma_v4_0_early_init(void *handle) -- cgit v1.2.1 From 3ef1381d4e7ddd3e063cf6fd33df96badfb66839 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 28 Mar 2018 16:23:28 +0800 Subject: drm/amdgpu: add df v1_7 header files Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/df/df_1_7_default.h | 26 ++++++++++++ .../drm/amd/include/asic_reg/df/df_1_7_offset.h | 33 +++++++++++++++ .../drm/amd/include/asic_reg/df/df_1_7_sh_mask.h | 48 ++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h new file mode 100644 index 000000000000..9e19e723081b --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_default.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_1_7_DEFAULT_HEADER +#define _df_1_7_DEFAULT_HEADER + +#define mmFabricConfigAccessControl_DEFAULT 0x00000000 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h new file mode 100644 index 000000000000..2b305dd021e8 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_1_7_OFFSET_HEADER +#define _df_1_7_OFFSET_HEADER + +#define mmFabricConfigAccessControl 0x0410 +#define mmFabricConfigAccessControl_BASE_IDX 0 + +#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc +#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0 + +#define mmDF_CS_AON0_DramBaseAddress0 0x0044 +#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h new file mode 100644 index 000000000000..2ba849798924 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_1_7_SH_MASK_HEADER +#define _df_1_7_SH_MASK_HEADER + +/* FabricConfigAccessControl */ +#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0 +#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1 +#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10 +#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L +#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L +#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L + +/* DF_PIE_AON0_DfGlobalClkGater */ +#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0 +#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL + +/* DF_CS_AON0_DramBaseAddress0 */ +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L + +#endif -- cgit v1.2.1 From 634c96e3f3c7982d4b3ad14f8e004d11af184e91 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 23 Mar 2018 11:37:25 +0800 Subject: drm/amdgpu: add df callback functions structure Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3000c4abe34f..df409ddb97e6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1368,7 +1368,17 @@ struct amdgpu_nbio_funcs { void (*detect_hw_virt)(struct amdgpu_device *adev); }; - +struct amdgpu_df_funcs { + void (*init)(struct amdgpu_device *adev); + void (*enable_broadcast_mode)(struct amdgpu_device *adev, + bool enable); + u32 (*get_fb_channel_number)(struct amdgpu_device *adev); + u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); +}; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { GC_HWIP = 1, @@ -1588,6 +1598,7 @@ struct amdgpu_device { uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; const struct amdgpu_nbio_funcs *nbio_funcs; + const struct amdgpu_df_funcs *df_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work late_init_work; -- cgit v1.2.1 From d99605ead70efa0dc259c28f9b258184e2b3e77c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 28 Mar 2018 16:27:56 +0800 Subject: drm/amdgpu/df: implement df v1_7 callback functions Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 4 ++ drivers/gpu/drm/amd/amdgpu/df_v1_7.c | 112 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/df_v1_7.h | 40 +++++++++++++ 3 files changed, 156 insertions(+) create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v1_7.c create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v1_7.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 2ca2b5154d52..2fe4a0bf98c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -64,6 +64,10 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o +# add DF block +amdgpu-y += \ + df_v1_7.o + # add GMC block amdgpu-y += \ gmc_v7_0.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c new file mode 100644 index 000000000000..4ffda996660f --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -0,0 +1,112 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v1_7.h" + +#include "df/df_1_7_default.h" +#include "df/df_1_7_offset.h" +#include "df/df_1_7_sh_mask.h" + +static u32 df_v1_7_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; + +static void df_v1_7_init (struct amdgpu_device *adev) +{ +} + +static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + if (enable) { + tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl); + tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp); + } else + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, + mmFabricConfigAccessControl_DEFAULT); +} + +static u32 df_v1_7_get_fb_channel_number(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); + tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + + return tmp; +} + +static u32 df_v1_7_get_hbm_channel_number(struct amdgpu_device *adev) +{ + int fb_channel_number; + + fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + + return df_v1_7_channel_number[fb_channel_number]; +} + +static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V1_7_MGCG_DISABLE; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } + + /* Exit boradcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); +} + +static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + u32 tmp; + + /* AMD_CG_SUPPORT_DF_MGCG */ + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + if (tmp & DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY) + *flags |= AMD_CG_SUPPORT_DF_MGCG; +} + +const struct amdgpu_df_funcs df_v1_7_funcs = { + .init = df_v1_7_init, + .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, + .get_fb_channel_number = df_v1_7_get_fb_channel_number, + .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, + .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating, + .get_clockgating_state = df_v1_7_get_clockgating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.h b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h new file mode 100644 index 000000000000..74621104c487 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.h @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V1_7_H__ +#define __DF_V1_7_H__ + +#include "soc15_common.h" +enum DF_V1_7_MGCG +{ + DF_V1_7_MGCG_DISABLE = 0, + DF_V1_7_MGCG_ENABLE_00_CYCLE_DELAY =1, + DF_V1_7_MGCG_ENABLE_01_CYCLE_DELAY =2, + DF_V1_7_MGCG_ENABLE_15_CYCLE_DELAY =13, + DF_V1_7_MGCG_ENABLE_31_CYCLE_DELAY =14, + DF_V1_7_MGCG_ENABLE_63_CYCLE_DELAY =15 +}; + +extern const struct amdgpu_df_funcs df_v1_7_funcs; + +#endif -- cgit v1.2.1 From 070706c03b3e67207cc41bd97b67ff0930d79cb3 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Wed, 28 Mar 2018 17:08:04 +0800 Subject: drm/amdgpu: switch to use df callback functions Signed-off-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 35 +------------------- drivers/gpu/drm/amd/amdgpu/soc15.c | 62 +++-------------------------------- 2 files changed, 5 insertions(+), 92 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index e687363900bb..070946e1e4a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -714,7 +714,6 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { - u32 tmp; int chansize, numchan; int r; @@ -727,39 +726,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) else chansize = 128; - tmp = RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0); - tmp &= DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK; - tmp >>= DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; - switch (tmp) { - case 0: - default: - numchan = 1; - break; - case 1: - numchan = 2; - break; - case 2: - numchan = 0; - break; - case 3: - numchan = 4; - break; - case 4: - numchan = 0; - break; - case 5: - numchan = 8; - break; - case 6: - numchan = 0; - break; - case 7: - numchan = 16; - break; - case 8: - numchan = 2; - break; - } + numchan = adev->df_funcs->get_hbm_channel_number(adev); adev->gmc.vram_width = numchan * chansize; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 51cf8a30f6c2..654b015d5e05 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -52,6 +52,7 @@ #include "gmc_v9_0.h" #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" +#include "df_v1_7.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -60,33 +61,6 @@ #include "dce_virtual.h" #include "mxgpu_ai.h" -#define mmFabricConfigAccessControl 0x0410 -#define mmFabricConfigAccessControl_BASE_IDX 0 -#define mmFabricConfigAccessControl_DEFAULT 0x00000000 -//FabricConfigAccessControl -#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0 -#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1 -#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10 -#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L -#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L -#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L - - -#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc -#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0 -//DF_PIE_AON0_DfGlobalClkGater -#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0 -#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL - -enum { - DF_MGCG_DISABLE = 0, - DF_MGCG_ENABLE_00_CYCLE_DELAY =1, - DF_MGCG_ENABLE_01_CYCLE_DELAY =2, - DF_MGCG_ENABLE_15_CYCLE_DELAY =13, - DF_MGCG_ENABLE_31_CYCLE_DELAY =14, - DF_MGCG_ENABLE_63_CYCLE_DELAY =15 -}; - #define mmMP0_MISC_CGTT_CTRL0 0x01b9 #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba @@ -521,6 +495,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) else adev->nbio_funcs = &nbio_v6_1_funcs; + adev->df_funcs = &df_v1_7_funcs; adev->nbio_funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) @@ -871,32 +846,6 @@ static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *ade WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); } -static void soc15_update_df_medium_grain_clock_gating(struct amdgpu_device *adev, - bool enable) -{ - uint32_t data; - - /* Put DF on broadcast mode */ - data = RREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl)); - data &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; - WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), data); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { - data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); - data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - data |= DF_MGCG_ENABLE_15_CYCLE_DELAY; - WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data); - } else { - data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); - data &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; - data |= DF_MGCG_DISABLE; - WREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater), data); - } - - WREG32(SOC15_REG_OFFSET(DF, 0, mmFabricConfigAccessControl), - mmFabricConfigAccessControl_DEFAULT); -} - static int soc15_common_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -920,7 +869,7 @@ static int soc15_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE ? true : false); soc15_update_rom_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - soc15_update_df_medium_grain_clock_gating(adev, + adev->df_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); break; case CHIP_RAVEN: @@ -973,10 +922,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) *flags |= AMD_CG_SUPPORT_ROM_MGCG; - /* AMD_CG_SUPPORT_DF_MGCG */ - data = RREG32(SOC15_REG_OFFSET(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater)); - if (data & DF_MGCG_ENABLE_15_CYCLE_DELAY) - *flags |= AMD_CG_SUPPORT_DF_MGCG; + adev->df_funcs->get_clockgating_state(adev, flags); } static int soc15_common_set_powergating_state(void *handle, -- cgit v1.2.1 From c99c7d6ef9f610145308577ae5845cd6e14051e2 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 30 Mar 2018 13:05:44 +0800 Subject: drm/amd/display: Disentangle dc.h include from amdgpu.h Use forward declaration in amdgpu_dm.h for struct dc instand of include dc.h to make header files more standalone Reviewed-by: Harry Wentland Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b68400c1154b..3af699b24e10 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -28,7 +28,6 @@ #include #include -#include "dc.h" /* * This file contains the definition for amdgpu_display_manager @@ -53,6 +52,7 @@ struct amdgpu_device; struct drm_device; struct amdgpu_dm_irq_handler_data; +struct dc; struct amdgpu_dm_prev_state { struct drm_framebuffer *fb; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..ef5fad8c5aac 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -25,6 +25,7 @@ #include "amdgpu_mode.h" #include "amdgpu_dm.h" +#include "dc.h" #include "modules/color/color_gamma.h" #define MAX_DRM_LUT_VALUE 0xFFFF -- cgit v1.2.1 From e68d14dd4ebaf596bf0c237ba82f815c2f561dec Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:38 +0100 Subject: drm/amdgpu: Move GEM BO to drm_framebuffer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since drm_framebuffer can now store GEM objects directly, place them there rather than in our own subclass. As this makes the framebuffer create_handle and destroy functions the same as the GEM framebuffer helper, we can reuse those. Acked-by: Alex Deucher Signed-off-by: Daniel Stone Cc: Alex Deucher Cc: Christian König Cc: David (ChunMing) Zhou Cc: amd-gfx@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 36 +++++------------------ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 10 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | 1 - drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 17 ++++------- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 17 ++++------- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 17 ++++------- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 17 ++++------- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 4 +-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 +++---- 10 files changed, 40 insertions(+), 96 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a53926580b3d..e0d6b1ddd213 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2541,7 +2541,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); + struct drm_framebuffer *fb = crtc->primary->fb; struct amdgpu_bo *robj; if (amdgpu_crtc->cursor_bo) { @@ -2553,10 +2553,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) } } - if (rfb == NULL || rfb->obj == NULL) { + if (fb == NULL || fb->obj[0] == NULL) { continue; } - robj = gem_to_amdgpu_bo(rfb->obj); + robj = gem_to_amdgpu_bo(fb->obj[0]); /* don't unpin kernel fb objects */ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { r = amdgpu_bo_reserve(robj, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 93f700ab1bfb..b83ae998fe27 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -35,6 +35,7 @@ #include #include #include +#include #include static void amdgpu_display_flip_callback(struct dma_fence *f, @@ -151,8 +152,6 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - struct amdgpu_framebuffer *old_amdgpu_fb; - struct amdgpu_framebuffer *new_amdgpu_fb; struct drm_gem_object *obj; struct amdgpu_flip_work *work; struct amdgpu_bo *new_abo; @@ -174,15 +173,13 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; /* schedule unpin of the old buffer */ - old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - obj = old_amdgpu_fb->obj; + obj = crtc->primary->fb->obj[0]; /* take a reference to the old object */ work->old_abo = gem_to_amdgpu_bo(obj); amdgpu_bo_ref(work->old_abo); - new_amdgpu_fb = to_amdgpu_framebuffer(fb); - obj = new_amdgpu_fb->obj; + obj = fb->obj[0]; new_abo = gem_to_amdgpu_bo(obj); /* pin the new buffer */ @@ -482,28 +479,9 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector, return true; } -static void amdgpu_display_user_framebuffer_destroy(struct drm_framebuffer *fb) -{ - struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - - drm_gem_object_put_unlocked(amdgpu_fb->obj); - drm_framebuffer_cleanup(fb); - kfree(amdgpu_fb); -} - -static int amdgpu_display_user_framebuffer_create_handle( - struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - - return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle); -} - static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { - .destroy = amdgpu_display_user_framebuffer_destroy, - .create_handle = amdgpu_display_user_framebuffer_create_handle, + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, }; uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev) @@ -526,11 +504,11 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, struct drm_gem_object *obj) { int ret; - rfb->obj = obj; + rfb->base.obj[0] = obj; drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) { - rfb->obj = NULL; + rfb->base.obj[0] = NULL; return ret; } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 12063019751b..ff89e84b34ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -292,9 +292,9 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb drm_fb_helper_unregister_fbi(&rfbdev->helper); - if (rfb->obj) { - amdgpufb_destroy_pinned_object(rfb->obj); - rfb->obj = NULL; + if (rfb->base.obj[0]) { + amdgpufb_destroy_pinned_object(rfb->base.obj[0]); + rfb->base.obj[0] = NULL; drm_framebuffer_unregister_private(&rfb->base); drm_framebuffer_cleanup(&rfb->base); } @@ -377,7 +377,7 @@ int amdgpu_fbdev_total_size(struct amdgpu_device *adev) if (!adev->mode_info.rfbdev) return 0; - robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj); + robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); size += amdgpu_bo_size(robj); return size; } @@ -386,7 +386,7 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) { if (!adev->mode_info.rfbdev) return false; - if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.obj)) + if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) return true; return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index d6416ee52e32..b9e9e8b02fb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -308,7 +308,6 @@ struct amdgpu_display_funcs { struct amdgpu_framebuffer { struct drm_framebuffer base; - struct drm_gem_object *obj; /* caching for later use */ uint64_t address; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 452f88ea46a2..ada241bfeee9 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -1823,7 +1823,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1842,18 +1841,15 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, return 0; } - if (atomic) { - amdgpu_fb = to_amdgpu_framebuffer(fb); + if (atomic) target_fb = fb; - } else { - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + else target_fb = crtc->primary->fb; - } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ - obj = amdgpu_fb->obj; + obj = target_fb->obj[0]; abo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) @@ -2043,8 +2039,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { - amdgpu_fb = to_amdgpu_framebuffer(fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r != 0)) return r; @@ -2526,11 +2521,9 @@ static void dce_v10_0_crtc_disable(struct drm_crtc *crtc) dce_v10_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; - struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_bo *abo; - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r)) DRM_ERROR("failed to reserve abo before unpin\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index a7c1c584a191..d3ae508b2a92 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -1862,7 +1862,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1881,18 +1880,15 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, return 0; } - if (atomic) { - amdgpu_fb = to_amdgpu_framebuffer(fb); + if (atomic) target_fb = fb; - } else { - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + else target_fb = crtc->primary->fb; - } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ - obj = amdgpu_fb->obj; + obj = target_fb->obj[0]; abo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) @@ -2082,8 +2078,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { - amdgpu_fb = to_amdgpu_framebuffer(fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r != 0)) return r; @@ -2601,11 +2596,9 @@ static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; - struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_bo *abo; - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r)) DRM_ERROR("failed to reserve abo before unpin\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 9f67b7fd3487..394cc1e8fe20 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1780,7 +1780,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1798,18 +1797,15 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, return 0; } - if (atomic) { - amdgpu_fb = to_amdgpu_framebuffer(fb); + if (atomic) target_fb = fb; - } else { - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + else target_fb = crtc->primary->fb; - } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ - obj = amdgpu_fb->obj; + obj = target_fb->obj[0]; abo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) @@ -1978,8 +1974,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { - amdgpu_fb = to_amdgpu_framebuffer(fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r != 0)) return r; @@ -2414,11 +2409,9 @@ static void dce_v6_0_crtc_disable(struct drm_crtc *crtc) dce_v6_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; - struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_bo *abo; - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r)) DRM_ERROR("failed to reserve abo before unpin\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index f55422cbd77a..c9b9ab8f1b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1754,7 +1754,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; - struct amdgpu_framebuffer *amdgpu_fb; struct drm_framebuffer *target_fb; struct drm_gem_object *obj; struct amdgpu_bo *abo; @@ -1773,18 +1772,15 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, return 0; } - if (atomic) { - amdgpu_fb = to_amdgpu_framebuffer(fb); + if (atomic) target_fb = fb; - } else { - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); + else target_fb = crtc->primary->fb; - } /* If atomic, assume fb object is pinned & idle & fenced and * just update base pointers */ - obj = amdgpu_fb->obj; + obj = target_fb->obj[0]; abo = gem_to_amdgpu_bo(obj); r = amdgpu_bo_reserve(abo, false); if (unlikely(r != 0)) @@ -1955,8 +1951,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); if (!atomic && fb && fb != crtc->primary->fb) { - amdgpu_fb = to_amdgpu_framebuffer(fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r != 0)) return r; @@ -2430,11 +2425,9 @@ static void dce_v8_0_crtc_disable(struct drm_crtc *crtc) dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; - struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_bo *abo; - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r)) DRM_ERROR("failed to reserve abo before unpin\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index b51f05dc9582..89b2286a9d6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -168,11 +168,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc) dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); if (crtc->primary->fb) { int r; - struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_bo *abo; - amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); - abo = gem_to_amdgpu_bo(amdgpu_fb->obj); + abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); r = amdgpu_bo_reserve(abo, true); if (unlikely(r)) DRM_ERROR("failed to reserve abo before unpin\n"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3ff3905eee9a..077ee6793a1c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1819,7 +1819,7 @@ static bool fill_rects_from_plane_state(const struct drm_plane_state *state, static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb, uint64_t *tiling_flags) { - struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->obj); + struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]); int r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { @@ -3028,8 +3028,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, } afb = to_amdgpu_framebuffer(new_state->fb); - - obj = afb->obj; + obj = new_state->fb->obj[0]; rbo = gem_to_amdgpu_bo(obj); adev = amdgpu_ttm_adev(rbo->tbo.bdev); r = amdgpu_bo_reserve(rbo, false); @@ -3093,14 +3092,12 @@ static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct amdgpu_bo *rbo; - struct amdgpu_framebuffer *afb; int r; if (!old_state->fb) return; - afb = to_amdgpu_framebuffer(old_state->fb); - rbo = gem_to_amdgpu_bo(afb->obj); + rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); r = amdgpu_bo_reserve(rbo, false); if (unlikely(r)) { DRM_ERROR("failed to reserve rbo before unpin\n"); @@ -3896,7 +3893,7 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc, int r, vpos, hpos; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); - struct amdgpu_bo *abo = gem_to_amdgpu_bo(afb->obj); + struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); struct amdgpu_device *adev = crtc->dev->dev_private; bool async_flip = (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; struct dc_flip_addrs addr = { {0} }; -- cgit v1.2.1 From 844c541951a00ddffa0248c72e1d7d3e4afaad30 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 26 Mar 2018 12:56:56 -0500 Subject: drm/amdgpu: add documentation on hwmon interfaces exposed (v3) Provide detail on the currently exposed hwmon interfaces for temperature, power, voltage, and fan. v2: add power cap documentation v3: add a comment about sensors tool Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 40 ++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index e6e365852f11..e5f60fc31516 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1109,6 +1109,46 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return count; } + +/** + * DOC: hwmon + * + * The amdgpu driver exposes the following sensor interfaces: + * - GPU temperature (via the on-die sensor) + * - GPU voltage + * - Northbridge voltage (APUs only) + * - GPU power + * - GPU fan + * + * hwmon interfaces for GPU temperature: + * - temp1_input: the on die GPU temperature in millidegrees Celsius + * - temp1_crit: temperature critical max value in millidegrees Celsius + * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius + * + * hwmon interfaces for GPU voltage: + * - in0_input: the voltage on the GPU in millivolts + * - in1_input: the voltage on the Northbridge in millivolts + * + * hwmon interfaces for GPU power: + * - power1_average: average power used by the GPU in microWatts + * - power1_cap_min: minimum cap supported in microWatts + * - power1_cap_max: maximum cap supported in microWatts + * - power1_cap: selected power cap in microWatts + * + * hwmon interfaces for GPU fan: + * - pwm1: pulse width modulation fan level (0-255) + * - pwm1_enable: pulse width modulation fan control method + * 0: no fan speed control + * 1: manual fan speed control using pwm interface + * 2: automatic fan speed control + * - pwm1_min: pulse width modulation fan control minimum level (0) + * - pwm1_max: pulse width modulation fan control maximum level (255) + * - fan1_input: fan speed in RPM + * + * You can use hwmon tools like sensors to view this information on your system. + * + */ + static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); -- cgit v1.2.1 From 6907069004216e630d30847bf2893ab18156ed0f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Mar 2018 13:51:28 -0500 Subject: drm/amdgpu: add asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow us to determine at the soc level whether the asic requires full reset or if soft reset will work. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index df409ddb97e6..21272ce74b56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1204,6 +1204,8 @@ struct amdgpu_asic_funcs { /* invalidate hdp read cache */ void (*invalidate_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + /* check if the asic needs a full reset of if soft reset will work */ + bool (*need_full_reset)(struct amdgpu_device *adev); }; /* @@ -1773,6 +1775,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) +#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) -- cgit v1.2.1 From 0a881af83cf8d0a9d270f63dd378f4eefda60c48 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 3 Apr 2018 13:27:14 -0500 Subject: drm/amdgpu/si: implement asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used to check on a per SoC basis whether the SoC needs a full reset of a per IP soft reset. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/si.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index a675ec6d2811..c364ef94cc36 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1252,6 +1252,12 @@ static void si_invalidate_hdp(struct amdgpu_device *adev, } } +static bool si_need_full_reset(struct amdgpu_device *adev) +{ + /* change this when we support soft reset */ + return true; +} + static int si_get_pcie_lanes(struct amdgpu_device *adev) { u32 link_width_cntl; @@ -1332,6 +1338,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs = .get_config_memsize = &si_get_config_memsize, .flush_hdp = &si_flush_hdp, .invalidate_hdp = &si_invalidate_hdp, + .need_full_reset = &si_need_full_reset, }; static uint32_t si_get_rev_id(struct amdgpu_device *adev) -- cgit v1.2.1 From b7acb46f210b92005b8db16380eac6b3a4c61431 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Mar 2018 14:39:10 -0500 Subject: drm/amdgpu/cik: implement asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used to check on a per SoC basis whether the SoC needs a full reset of a per IP soft reset. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 0df22030e713..8ff4c60d1b59 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1735,6 +1735,12 @@ static void cik_invalidate_hdp(struct amdgpu_device *adev, } } +static bool cik_need_full_reset(struct amdgpu_device *adev) +{ + /* change this when we support soft reset */ + return true; +} + static const struct amdgpu_asic_funcs cik_asic_funcs = { .read_disabled_bios = &cik_read_disabled_bios, @@ -1748,6 +1754,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_config_memsize = &cik_get_config_memsize, .flush_hdp = &cik_flush_hdp, .invalidate_hdp = &cik_invalidate_hdp, + .need_full_reset = &cik_need_full_reset, }; static int cik_common_early_init(void *handle) -- cgit v1.2.1 From 06082d9b711fd5889c5f182c6fa629891e5b48c3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Mar 2018 14:39:28 -0500 Subject: drm/amdgpu/vi: implement asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used to check on a per SoC basis whether the SoC needs a full reset of a per IP soft reset. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 126f1276d347..1b4ee249b95a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -876,6 +876,27 @@ static void vi_invalidate_hdp(struct amdgpu_device *adev, } } +static bool vi_need_full_reset(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + /* CZ has hang issues with full reset at the moment */ + return false; + case CHIP_FIJI: + case CHIP_TONGA: + /* XXX: soft reset should work on fiji and tonga */ + return true; + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + default: + /* change this when we support soft reset */ + return true; + } +} + static const struct amdgpu_asic_funcs vi_asic_funcs = { .read_disabled_bios = &vi_read_disabled_bios, @@ -889,6 +910,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_config_memsize = &vi_get_config_memsize, .flush_hdp = &vi_flush_hdp, .invalidate_hdp = &vi_invalidate_hdp, + .need_full_reset = &vi_need_full_reset, }; #define CZ_REV_BRISTOL(rev) \ -- cgit v1.2.1 From adbd4f894f3615f04a4c0cfb931ed647c0280a5f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Mar 2018 14:39:46 -0500 Subject: drm/amdgpu/soc15: implement asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Used to check on a per SoC basis whether the SoC needs a full reset of a per IP soft reset. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 654b015d5e05..2e9ebe8db5cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -568,6 +568,12 @@ static void soc15_invalidate_hdp(struct amdgpu_device *adev, HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); } +static bool soc15_need_full_reset(struct amdgpu_device *adev) +{ + /* change this when we implement soft reset */ + return true; +} + static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, @@ -581,6 +587,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = .get_config_memsize = &soc15_get_config_memsize, .flush_hdp = &soc15_flush_hdp, .invalidate_hdp = &soc15_invalidate_hdp, + .need_full_reset = &soc15_need_full_reset, }; static int soc15_common_early_init(void *handle) -- cgit v1.2.1 From 8bc04c2965879c79bb84e3fc8410e6c90cecc96d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 29 Mar 2018 14:48:37 -0500 Subject: drm/amdgpu: use new asic need_full_reset callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the new callback to determine whether to use full asic reset or per IP soft reset. Enables reset to actually proceed on asics which don't support soft reset yet. Reviewed-by: Christian König Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e0d6b1ddd213..abc33464959e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2738,6 +2738,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return true; + if (amdgpu_asic_need_full_reset(adev)) + return true; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -2794,6 +2797,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev) { int i; + if (amdgpu_asic_need_full_reset(adev)) + return true; + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; -- cgit v1.2.1 From e63f86735d9220c8ca6929dc07a4c78f111a6201 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Mar 2018 17:00:47 +0100 Subject: drm/amd/display: fix spelling mistake: "Usupported" -> "Unsupported" Trivial fix to spelling mistake in DRM_ERROR error message text Reviewed-by: Harry Wentland Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 077ee6793a1c..fbde450277e8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1521,7 +1521,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) break; #endif default: - DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); goto fail; } @@ -1714,7 +1714,7 @@ static int dm_early_init(void *handle) break; #endif default: - DRM_ERROR("Usupported ASIC type: 0x%X\n", adev->asic_type); + DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); return -EINVAL; } -- cgit v1.2.1 From 5a8c102ac471c53da38b2c3c35417e9355d21215 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Fri, 16 Mar 2018 12:29:38 +0800 Subject: drm/amdgpu: Don't change preferred domian when fallback GTT v6 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: add sanity checking v3: make code open v4: also handle visible to invisible fallback v5: Since two fallback cases, re-use goto retry v6: avoid bo is unref when retry, and only user BO can fallback Signed-off-by: Chunming Zhou Reviewed-by: Christian König (v5) Acked-by: Felix Kuehling Cc: felix.kuehling@amd.com Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 16 ++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 37 +++++++++++++++++++----------- 2 files changed, 26 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 46b9ea4e6103..28c2706e48d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -56,23 +56,11 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } -retry: r = amdgpu_bo_create(adev, size, alignment, initial_domain, flags, type, resv, &bo); if (r) { - if (r != -ERESTARTSYS) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } - - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; - goto retry; - } - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); - } + DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", + size, initial_domain, alignment, r); return r; } *obj = &bo->gem_base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6d08cde8443c..04d6830347ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -356,6 +356,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, struct amdgpu_bo *bo; unsigned long page_align; size_t acc_size; + u32 domains, preferred_domains, allowed_domains; int r; page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; @@ -369,22 +370,24 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, sizeof(struct amdgpu_bo)); + preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); + allowed_domains = preferred_domains; + if (type != ttm_bo_type_kernel && + allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; + domains = preferred_domains; +retry: bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); - bo->allowed_domains = bo->preferred_domains; - if (type != ttm_bo_type_kernel && - bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) - bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; bo->flags = flags; @@ -417,12 +420,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, domain); - + amdgpu_ttm_placement_from_domain(bo, domains); r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, &ctx, acc_size, NULL, resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r != 0)) + if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } else if (domains != allowed_domains) { + domains = allowed_domains; + goto retry; + } + } + if (unlikely(r)) return r; if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && -- cgit v1.2.1 From 552825b28ddac200b6080d9e79f4121b68e1517d Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 2 Apr 2018 11:20:44 +0800 Subject: drm/amdgpu: add new bo flag that indicates BOs don't need fallback (v2) user cases: 1. KFD wraps amdgpu_bo_create, they have no fallback case which is different with amdgpu_gem_object_create. since upstream branch has no amdgpu_amdkfd_gpuvm.c, which need KFD guys add this flag to __alloc_memory_of_gpu: + flags |= AMDGPU_GEM_CREATE_NO_FALLBACK; 2. UMD can specify this flag for their allocation as well if they like. v2: squash in merge conflict fix (Chunming) Signed-off-by: Chunming Zhou Acked-by: Felix Kuehling Cc: felix.kuehling@amd.com Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index dc34b50e6b29..d7d7ce1507ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -386,7 +386,8 @@ retry: bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) p->bytes_moved_vis += ctx.bytes_moved; - if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { + if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains && + !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) { domain = bo->allowed_domains; goto retry; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 04d6830347ec..9e23d6f6f3f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -388,6 +388,8 @@ retry: drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); + bo->preferred_domains = preferred_domains; + bo->allowed_domains = allowed_domains; bo->flags = flags; @@ -424,7 +426,8 @@ retry: r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, &ctx, acc_size, NULL, resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) { + if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device && + !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; goto retry; -- cgit v1.2.1 From 1eb1547fd0267fbb5fabe4973210dfc295a92725 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 3 Apr 2018 10:41:32 -0400 Subject: drm/amdgpu: Added support for MV packet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Motion vector packet needs support in physical mode. Signed-off-by: James Zhu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index a33804bd3314..d7261e01ff8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -755,6 +755,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) if (r) goto out; break; + + case 0x0500000d: /* MV buffer */ + r = amdgpu_vce_validate_bo(p, ib_idx, idx + 3, + idx + 2, 0, 0); + if (r) + goto out; + + r = amdgpu_vce_validate_bo(p, ib_idx, idx + 8, + idx + 7, 0, 0); + if (r) + goto out; + break; } idx += len / 4; @@ -860,6 +872,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) goto out; break; + case 0x0500000d: /* MV buffer */ + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, + idx + 2, *size, 0); + if (r) + goto out; + + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 8, + idx + 7, *size / 12, 0); + if (r) + goto out; + break; + default: DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); r = -EINVAL; -- cgit v1.2.1 From 8218d7f1f70179a532639f01dfd32dc5dbb09ed3 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 17 Oct 2017 12:02:01 -0400 Subject: drm/amd/display: Don't access legacy properties We're an atomic driver and shouldn't access legacy properties. Doing so will only scare users with stack traces. Instead save the prop in the state and access it directly. Much simpler. Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 ++++++++++--- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fbde450277e8..74839478bdc9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5118,17 +5118,24 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, struct edid *edid) { int i; - uint64_t val_capable; bool edid_check_required; struct detailed_timing *timing; struct detailed_non_pixel *data; struct detailed_data_monitor_range *range; struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_con_state; struct drm_device *dev = connector->dev; struct amdgpu_device *adev = dev->dev_private; + if (!connector->state) { + DRM_ERROR("%s - Connector has no state", __func__); + return; + } + + dm_con_state = to_dm_connector_state(connector->state); + edid_check_required = false; if (!amdgpu_dm_connector->dc_sink) { DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); @@ -5147,7 +5154,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, amdgpu_dm_connector); } } - val_capable = 0; + dm_con_state->freesync_capable = false; if (edid_check_required == true && (edid->version > 1 || (edid->version == 1 && edid->revision > 1))) { for (i = 0; i < 4; i++) { @@ -5183,7 +5190,7 @@ void amdgpu_dm_add_sink_to_freesync_module(struct drm_connector *connector, amdgpu_dm_connector->min_vfreq * 1000000; amdgpu_dm_connector->caps.max_refresh_in_micro_hz = amdgpu_dm_connector->max_vfreq * 1000000; - val_capable = 1; + dm_con_state->freesync_capable = true; } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 3af699b24e10..005cf0d2dc34 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -220,6 +220,7 @@ struct dm_connector_state { uint8_t underscan_hborder; bool underscan_enable; struct mod_freesync_user_enable user_enable; + bool freesync_capable; }; #define to_dm_connector_state(x)\ -- cgit v1.2.1 From 742811b7121ec4e426edb4a21657ca5523955489 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 12 Mar 2018 11:16:47 -0400 Subject: drm/amd/display: Only register backlight device if embedded panel connected Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 58 +++++++++++++---------- 1 file changed, 33 insertions(+), 25 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 74839478bdc9..6636f4e9d30c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1403,6 +1403,28 @@ static int initialize_plane(struct amdgpu_display_manager *dm, return ret; } + +static void register_backlight_device(struct amdgpu_display_manager *dm, + struct dc_link *link) +{ +#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ + defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) + + if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && + link->type != dc_connection_none) { + /* Event if registration failed, we should continue with + * DM initialization because not having a backlight control + * is better then a black screen. + */ + amdgpu_dm_register_backlight_device(dm); + + if (dm->backlight_dev) + dm->backlight_link = link; + } +#endif +} + + /* In this architecture, the association * connector -> encoder -> crtc * id not really requried. The crtc and connector will hold the @@ -1456,6 +1478,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) /* loops over all connectors on the board */ for (i = 0; i < link_cnt; i++) { + struct dc_link *link = NULL; if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { DRM_ERROR( @@ -1482,9 +1505,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) goto fail; } - if (dc_link_detect(dc_get_link_at_index(dm->dc, i), - DETECT_REASON_BOOT)) + link = dc_get_link_at_index(dm->dc, i); + + if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); + register_backlight_device(dm, link); + } + + } /* Software is initialized. Now we can register interrupt handlers. */ @@ -2684,7 +2712,8 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { + if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && + link->type != dc_connection_none) { amdgpu_dm_register_backlight_device(dm); if (dm->backlight_dev) { @@ -3557,6 +3586,7 @@ create_i2c(struct ddc_service *ddc_service, return i2c; } + /* Note: this function assumes that dc_link_detect() was called for the * dc_link which will be represented by this aconnector. */ @@ -3626,28 +3656,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, || connector_type == DRM_MODE_CONNECTOR_eDP) amdgpu_dm_initialize_dp_connector(dm, aconnector); -#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ - defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) - - /* NOTE: this currently will create backlight device even if a panel - * is not connected to the eDP/LVDS connector. - * - * This is less than ideal but we don't have sink information at this - * stage since detection happens after. We can't do detection earlier - * since MST detection needs connectors to be created first. - */ - if (link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) { - /* Event if registration failed, we should continue with - * DM initialization because not having a backlight control - * is better then a black screen. - */ - amdgpu_dm_register_backlight_device(dm); - - if (dm->backlight_dev) - dm->backlight_link = link; - } -#endif - out_free: if (res) { kfree(i2c); -- cgit v1.2.1 From 5cd29ed0cb9a1985d3334d52d3b54f0defb23de4 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 12 Mar 2018 11:48:26 -0400 Subject: drm/amd/display: Don't register backlight on connector_destroy Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6636f4e9d30c..7ecc22462628 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2713,14 +2713,10 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) && - link->type != dc_connection_none) { - amdgpu_dm_register_backlight_device(dm); - - if (dm->backlight_dev) { - backlight_device_unregister(dm->backlight_dev); - dm->backlight_dev = NULL; - } - + link->type != dc_connection_none && + dm->backlight_dev) { + backlight_device_unregister(dm->backlight_dev); + dm->backlight_dev = NULL; } #endif drm_connector_unregister(connector); -- cgit v1.2.1 From 0c8df4bbc4de4789dde7fa622585803fd10dd8e4 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 15 Mar 2018 13:46:50 -0400 Subject: drm/amd/display: Program v_total_min/max after v_total_cntl Signed-off-by: Anthony Koo Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce110/dce110_timing_generator.c | 16 ++++++++-------- .../drm/amd/display/dc/dce120/dce120_timing_generator.c | 12 ++++++------ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 12 ++++++------ 3 files changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c index be7153924a70..1b2fe0df347f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c @@ -431,14 +431,6 @@ void dce110_timing_generator_set_drr( 0, CRTC_V_TOTAL_CONTROL, CRTC_SET_V_TOTAL_MIN_MASK); - set_reg_field_value(v_total_min, - 0, - CRTC_V_TOTAL_MIN, - CRTC_V_TOTAL_MIN); - set_reg_field_value(v_total_max, - 0, - CRTC_V_TOTAL_MAX, - CRTC_V_TOTAL_MAX); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, @@ -447,6 +439,14 @@ void dce110_timing_generator_set_drr( 0, CRTC_V_TOTAL_CONTROL, CRTC_V_TOTAL_MAX_SEL); + set_reg_field_value(v_total_min, + 0, + CRTC_V_TOTAL_MIN, + CRTC_V_TOTAL_MIN); + set_reg_field_value(v_total_max, + 0, + CRTC_V_TOTAL_MAX, + CRTC_V_TOTAL_MAX); set_reg_field_value(v_total_cntl, 0, CRTC_V_TOTAL_CONTROL, diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c index 7bee78172d85..2ea490f8482e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c @@ -570,18 +570,18 @@ void dce120_timing_generator_set_drr( 0x180); } else { - CRTC_REG_UPDATE( - CRTC0_CRTC_V_TOTAL_MIN, - CRTC_V_TOTAL_MIN, 0); - CRTC_REG_UPDATE( - CRTC0_CRTC_V_TOTAL_MAX, - CRTC_V_TOTAL_MAX, 0); CRTC_REG_SET_N(CRTC0_CRTC_V_TOTAL_CONTROL, 5, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MIN_SEL), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_V_TOTAL_MAX_SEL), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_ON_EVENT), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_FORCE_LOCK_TO_MASTER_VSYNC), 0, FD(CRTC0_CRTC_V_TOTAL_CONTROL__CRTC_SET_V_TOTAL_MIN_MASK), 0); + CRTC_REG_UPDATE( + CRTC0_CRTC_V_TOTAL_MIN, + CRTC_V_TOTAL_MIN, 0); + CRTC_REG_UPDATE( + CRTC0_CRTC_V_TOTAL_MAX, + CRTC_V_TOTAL_MAX, 0); CRTC_REG_UPDATE( CRTC0_CRTC_STATIC_SCREEN_CONTROL, CRTC_STATIC_SCREEN_EVENT_MASK, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index f56eac0e4dd2..dc921307874a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -855,17 +855,17 @@ void optc1_set_drr( OTG_SET_V_TOTAL_MIN_MASK_EN, 0, OTG_SET_V_TOTAL_MIN_MASK, 0); } else { - REG_SET(OTG_V_TOTAL_MIN, 0, - OTG_V_TOTAL_MIN, 0); - - REG_SET(OTG_V_TOTAL_MAX, 0, - OTG_V_TOTAL_MAX, 0); - REG_UPDATE_4(OTG_V_TOTAL_CONTROL, OTG_SET_V_TOTAL_MIN_MASK, 0, OTG_V_TOTAL_MIN_SEL, 0, OTG_V_TOTAL_MAX_SEL, 0, OTG_FORCE_LOCK_ON_EVENT, 0); + + REG_SET(OTG_V_TOTAL_MIN, 0, + OTG_V_TOTAL_MIN, 0); + + REG_SET(OTG_V_TOTAL_MAX, 0, + OTG_V_TOTAL_MAX, 0); } } -- cgit v1.2.1 From 9e3efe3eed47952e2e0209b3808989ae1cc6a31b Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 15 Mar 2018 15:08:04 -0400 Subject: drm/amd/display: Set ignore_msa_timing_param Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7ecc22462628..a6039e5b664d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2487,6 +2487,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream); + if (dm_state && dm_state->freesync_capable) + stream->ignore_msa_timing_param = true; + return stream; } -- cgit v1.2.1 From f110892ead622bdc9a7732a23aef3a08b0565608 Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Mon, 19 Mar 2018 15:22:51 -0400 Subject: drm/amd/display: Non-HDMI DP active dongle should not support YUV pixel format Signed-off-by: Hersen Wu Reviewed-by: Tony Cheng Reviewed-by: Harry Wentland Reviewed-by: Wesley Chalmers Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index c18f24afa698..e612841f7f91 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1848,9 +1848,22 @@ static void disable_link(struct dc_link *link, enum signal_type signal) static bool dp_active_dongle_validate_timing( const struct dc_crtc_timing *timing, - const struct dc_dongle_caps *dongle_caps) + const struct dpcd_caps *dpcd_caps) { unsigned int required_pix_clk = timing->pix_clk_khz; + const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_VGA_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_DONGLE: + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) + return true; + else + return false; + default: + break; + } if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) @@ -1916,7 +1929,7 @@ enum dc_status dc_link_validate_mode_timing( const struct dc_crtc_timing *timing) { uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; - struct dc_dongle_caps *dongle_caps = &link->dpcd_caps.dongle_caps; + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; /* A hack to avoid failing any modes for EDID override feature on * topology change such as lower quality cable for DP or different dongle @@ -1929,7 +1942,7 @@ enum dc_status dc_link_validate_mode_timing( return DC_EXCEED_DONGLE_CAP; /* Active Dongle*/ - if (!dp_active_dongle_validate_timing(timing, dongle_caps)) + if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) return DC_EXCEED_DONGLE_CAP; switch (stream->signal) { -- cgit v1.2.1 From 63bd5444f6937bf6bd27a2ab79162fec784dd83c Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Wed, 14 Mar 2018 15:54:27 -0400 Subject: drm/amd/display: Fix potential access beyond end of array in CM Signed-off-by: Harry Wentland CC: Dan Carpenter Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 881a1bff94d2..96d5878e9ccd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -367,15 +367,15 @@ bool cm_helper_translate_curve_to_hw_format( lut_params->hw_points_num = hw_points; - i = 1; - for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) { + k = 0; + for (i = 1; i < MAX_REGIONS_NUMBER; i++) { if (seg_distr[k] != -1) { lut_params->arr_curve_points[k].segments_num = seg_distr[k]; lut_params->arr_curve_points[i].offset = lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); } - i++; + k++; } if (seg_distr[k] != -1) @@ -529,15 +529,15 @@ bool cm_helper_translate_curve_to_degamma_hw_format( lut_params->hw_points_num = hw_points; - i = 1; - for (k = 0; k < MAX_REGIONS_NUMBER && i < MAX_REGIONS_NUMBER; k++) { + k = 0; + for (i = 1; i < MAX_REGIONS_NUMBER; i++) { if (seg_distr[k] != -1) { lut_params->arr_curve_points[k].segments_num = seg_distr[k]; lut_params->arr_curve_points[i].offset = lut_params->arr_curve_points[k].offset + (1 << seg_distr[k]); } - i++; + k++; } if (seg_distr[k] != -1) -- cgit v1.2.1 From 5d4b05ddd826d877327ecabf987b7c61ec3cb0c5 Mon Sep 17 00:00:00 2001 From: Bhawanpreet Lakha Date: Thu, 15 Mar 2018 13:01:46 -0400 Subject: drm/amd/display: Add Dynamic debug prints Created Macros for DC_LOG_XXX to pr_debug() & DRM_DEBUG_KMS. Signed-off-by: Bhawanpreet Lakha Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 -- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 20 +++---- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 19 ++++--- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 ++- drivers/gpu/drm/amd/display/dc/dce/dce_audio.c | 9 +-- .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 11 ++-- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 7 ++- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 14 ++--- drivers/gpu/drm/amd/display/include/logger_types.h | 64 +++++++++++----------- 9 files changed, 77 insertions(+), 79 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a6039e5b664d..2514d7b3b66e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -433,11 +433,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; - if (amdgpu_dc_log) - init_data.log_mask = DC_DEFAULT_LOG_MASK; - else - init_data.log_mask = DC_MIN_LOG_MASK; - /* * TODO debug why this doesn't work on Raven */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 71cc60fcff5e..a3c87611220d 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -36,8 +36,9 @@ #include "hw_sequencer.h" #include "resource.h" -#define DC_LOGGER \ - logger + +#define DC_LOGGER_INIT(logger) + #define SURFACE_TRACE(...) do {\ if (dc->debug.surface_trace) \ @@ -60,8 +61,7 @@ void pre_surface_trace( int surface_count) { int i; - struct dc *core_dc = dc; - struct dal_logger *logger = core_dc->ctx->logger; + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < surface_count; i++) { const struct dc_plane_state *plane_state = plane_states[i]; @@ -183,8 +183,7 @@ void update_surface_trace( int surface_count) { int i; - struct dc *core_dc = dc; - struct dal_logger *logger = core_dc->ctx->logger; + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < surface_count; i++) { const struct dc_surface_update *update = &updates[i]; @@ -304,8 +303,7 @@ void update_surface_trace( void post_surface_trace(struct dc *dc) { - struct dc *core_dc = dc; - struct dal_logger *logger = core_dc->ctx->logger; + DC_LOGGER_INIT(dc->ctx->logger); SURFACE_TRACE("post surface process.\n"); @@ -317,10 +315,10 @@ void context_timing_trace( { int i; struct dc *core_dc = dc; - struct dal_logger *logger = core_dc->ctx->logger; int h_pos[MAX_PIPES], v_pos[MAX_PIPES]; struct crtc_position position; unsigned int underlay_idx = core_dc->res_pool->underlay_pipe_index; + DC_LOGGER_INIT(dc->ctx->logger); for (i = 0; i < core_dc->res_pool->pipe_count; i++) { @@ -355,9 +353,7 @@ void context_clock_trace( struct dc_state *context) { #if defined(CONFIG_DRM_AMD_DC_DCN1_0) - struct dc *core_dc = dc; - struct dal_logger *logger = core_dc->ctx->logger; - + DC_LOGGER_INIT(dc->ctx->logger); CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n" "dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n", context->bw.dcn.calc_clk.dispclk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index e612841f7f91..d9efdd926145 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -45,8 +45,9 @@ #include "dce/dce_11_0_d.h" #include "dce/dce_11_0_enum.h" #include "dce/dce_11_0_sh_mask.h" -#define DC_LOGGER \ - dc_ctx->logger + +#define DC_LOGGER_INIT(logger) + #define LINK_INFO(...) \ DC_LOG_HW_HOTPLUG( \ @@ -561,7 +562,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) struct dc_context *dc_ctx = link->ctx; struct dc_sink *sink = NULL; enum dc_connection_type new_connection_type = dc_connection_none; - + DC_LOGGER_INIT(link->ctx->logger); if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) return false; @@ -927,6 +928,7 @@ static bool construct( struct integrated_info info = {{{ 0 }}}; struct dc_bios *bios = init_params->dc->ctx->dc_bios; const struct dc_vbios_funcs *bp_funcs = bios->funcs; + DC_LOGGER_INIT(dc_ctx->logger); link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; @@ -1135,7 +1137,8 @@ static void dpcd_configure_panel_mode( { union dpcd_edp_config edp_config_set; bool panel_mode_edp = false; - struct dc_context *dc_ctx = link->ctx; + DC_LOGGER_INIT(link->ctx->logger); + memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); if (DP_PANEL_MODE_DEFAULT != panel_mode) { @@ -1968,10 +1971,10 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, struct dc *core_dc = link->ctx->dc; struct abm *abm = core_dc->res_pool->abm; struct dmcu *dmcu = core_dc->res_pool->dmcu; - struct dc_context *dc_ctx = link->ctx; unsigned int controller_id = 0; bool use_smooth_brightness = true; int i; + DC_LOGGER_INIT(link->ctx->logger); if ((dmcu == NULL) || (abm == NULL) || @@ -2154,8 +2157,8 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; - struct dc_context *dc_ctx = link->ctx; uint8_t i; + DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count * and stream is in link->stream[]. This is called during set mode, @@ -2234,7 +2237,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0); uint8_t i; bool mst_mode = (link->type == dc_connection_mst_branch); - struct dc_context *dc_ctx = link->ctx; + DC_LOGGER_INIT(link->ctx->logger); /* deallocate_mst_payload is called before disable link. When mode or * disable/enable monitor, new stream is created which is not in link @@ -2307,8 +2310,8 @@ void core_link_enable_stream( struct pipe_ctx *pipe_ctx) { struct dc *core_dc = pipe_ctx->stream->ctx->dc; - struct dc_context *dc_ctx = pipe_ctx->stream->ctx; enum dc_status status; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 379b05536321..50b84f69bd25 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -45,8 +45,9 @@ #include "dcn10/dcn10_resource.h" #endif #include "dce120/dce120_resource.h" -#define DC_LOGGER \ - ctx->logger + +#define DC_LOGGER_INIT(logger) + enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) { enum dce_version dc_version = DCE_VERSION_UNKNOWN; @@ -835,7 +836,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; struct view recout_skip = { 0 }; bool res = false; - struct dc_context *ctx = pipe_ctx->stream->ctx; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); /* Important: scaling ratio calculation requires pixel format, * lb depth calculation requires recout and taps require scaling ratios. * Inits require viewport, taps, ratios and recout of split pipe diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 6d5cdcdc8ec9..7f6d724686f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -33,8 +33,9 @@ #define CTX \ aud->base.ctx -#define DC_LOGGER \ - aud->base.ctx->logger + +#define DC_LOGGER_INIT() + #define REG(reg)\ (aud->regs->reg) @@ -348,8 +349,8 @@ static void set_audio_latency( void dce_aud_az_enable(struct audio *audio) { - struct dce_audio *aud = DCE_AUD(audio); uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); + DC_LOGGER_INIT(); set_reg_field_value(value, 1, AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, @@ -371,7 +372,7 @@ void dce_aud_az_enable(struct audio *audio) void dce_aud_az_disable(struct audio *audio) { uint32_t value; - struct dce_audio *aud = DCE_AUD(audio); + DC_LOGGER_INIT(); value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); set_reg_field_value(value, 1, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 0aa2cda60890..67dad7f1e643 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -41,8 +41,9 @@ #define CTX \ clk_src->base.ctx -#define DC_LOGGER \ - calc_pll_cs->ctx->logger + +#define DC_LOGGER_INIT() + #undef FN #define FN(reg_name, field_name) \ clk_src->cs_shift->field_name, clk_src->cs_mask->field_name @@ -467,7 +468,7 @@ static uint32_t dce110_get_pix_clk_dividers_helper ( { uint32_t field = 0; uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; - struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll; + DC_LOGGER_INIT(); /* Check if reference clock is external (not pcie/xtalin) * HW Dce80 spec: * 00 - PCIE_REFCLK, 01 - XTALIN, 02 - GENERICA, 03 - GENERICB @@ -557,8 +558,8 @@ static uint32_t dce110_get_pix_clk_dividers( struct pll_settings *pll_settings) { struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(cs); - struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll; uint32_t pll_calc_error = MAX_PLL_CALC_ERROR; + DC_LOGGER_INIT(); if (pix_clk_params == NULL || pll_settings == NULL || pix_clk_params->requested_pix_clk == 0) { @@ -1054,7 +1055,7 @@ static void get_ss_info_from_atombios( struct spread_spectrum_info *ss_info_cur; struct spread_spectrum_data *ss_data_cur; uint32_t i; - struct calc_pll_clock_source *calc_pll_cs = &clk_src->calc_pll; + DC_LOGGER_INIT(); if (ss_entries_num == NULL) { DC_LOG_SYNC( "Invalid entry !!!\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index c6212301712b..e8df50f30e5b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -70,8 +70,9 @@ #define CTX \ hws->ctx -#define DC_LOGGER \ - ctx->logger + +#define DC_LOGGER_INIT() + #define REG(reg)\ hws->regs->reg @@ -2701,7 +2702,7 @@ static void dce110_program_front_end_for_pipe( struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; unsigned int i; - struct dc_context *ctx = dc->ctx; + DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); if (dc->current_state) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e21458169d15..de5293dc4db3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -45,8 +45,8 @@ #include "dcn10_hubbub.h" #include "dcn10_cm_common.h" -#define DC_LOGGER \ - ctx->logger +#define DC_LOGGER_INIT(logger) + #define CTX \ hws->ctx #define REG(reg)\ @@ -363,7 +363,7 @@ static void power_on_plane( struct dce_hwseq *hws, int plane_id) { - struct dc_context *ctx = hws->ctx; + DC_LOGGER_INIT(hws->ctx->logger); if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); @@ -562,7 +562,7 @@ static void reset_back_end_for_pipe( struct dc_state *context) { int i; - struct dc_context *ctx = dc->ctx; + DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { pipe_ctx->stream = NULL; return; @@ -658,7 +658,7 @@ static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct dce_hwseq *hws = dc->hwseq; struct dpp *dpp = pipe_ctx->plane_res.dpp; - struct dc_context *ctx = dc->ctx; + DC_LOGGER_INIT(dc->ctx->logger); if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, @@ -708,7 +708,7 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx) static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx) { - struct dc_context *ctx = dc->ctx; + DC_LOGGER_INIT(dc->ctx->logger); if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated) return; @@ -2001,9 +2001,9 @@ static void dcn10_apply_ctx_for_surface( bool removed_pipe[4] = { false }; unsigned int ref_clk_mhz = dc->res_pool->ref_clock_inKhz/1000; bool program_water_mark = false; - struct dc_context *ctx = dc->ctx; struct pipe_ctx *top_pipe_to_program = find_top_pipe_for_stream(dc, context, stream); + DC_LOGGER_INIT(dc->ctx->logger); if (!top_pipe_to_program) return; diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 427796bdc14a..4f332e80cecc 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -29,39 +29,39 @@ #include "os_types.h" #define MAX_NAME_LEN 32 -#define DC_LOG_ERROR(a, ...) dm_logger_write(DC_LOGGER, LOG_ERROR, a, ## __VA_ARGS__) -#define DC_LOG_WARNING(a, ...) dm_logger_write(DC_LOGGER, LOG_WARNING, a, ## __VA_ARGS__) -#define DC_LOG_DEBUG(a, ...) dm_logger_write(DC_LOGGER, LOG_DEBUG, a, ## __VA_ARGS__) -#define DC_LOG_DC(a, ...) dm_logger_write(DC_LOGGER, LOG_DC, a, ## __VA_ARGS__) -#define DC_LOG_DTN(a, ...) dm_logger_write(DC_LOGGER, LOG_DTN, a, ## __VA_ARGS__) -#define DC_LOG_SURFACE(a, ...) dm_logger_write(DC_LOGGER, LOG_SURFACE, a, ## __VA_ARGS__) -#define DC_LOG_HW_HOTPLUG(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HOTPLUG, a, ## __VA_ARGS__) -#define DC_LOG_HW_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_LINK_TRAINING, a, ## __VA_ARGS__) -#define DC_LOG_HW_SET_MODE(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_SET_MODE, a, ## __VA_ARGS__) -#define DC_LOG_HW_RESUME_S3(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_RESUME_S3, a, ## __VA_ARGS__) -#define DC_LOG_HW_AUDIO(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_AUDIO, a, ## __VA_ARGS__) -#define DC_LOG_HW_HPD_IRQ(a, ...) dm_logger_write(DC_LOGGER, LOG_HW_HPD_IRQ, a, ## __VA_ARGS__) -#define DC_LOG_MST(a, ...) dm_logger_write(DC_LOGGER, LOG_MST, a, ## __VA_ARGS__) -#define DC_LOG_SCALER(a, ...) dm_logger_write(DC_LOGGER, LOG_SCALER, a, ## __VA_ARGS__) -#define DC_LOG_BIOS(a, ...) dm_logger_write(DC_LOGGER, LOG_BIOS, a, ## __VA_ARGS__) -#define DC_LOG_BANDWIDTH_CALCS(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_CALCS, a, ## __VA_ARGS__) -#define DC_LOG_BANDWIDTH_VALIDATION(a, ...) dm_logger_write(DC_LOGGER, LOG_BANDWIDTH_VALIDATION, a, ## __VA_ARGS__) -#define DC_LOG_I2C_AUX(a, ...) dm_logger_write(DC_LOGGER, LOG_I2C_AUX, a, ## __VA_ARGS__) -#define DC_LOG_SYNC(a, ...) dm_logger_write(DC_LOGGER, LOG_SYNC, a, ## __VA_ARGS__) -#define DC_LOG_BACKLIGHT(a, ...) dm_logger_write(DC_LOGGER, LOG_BACKLIGHT, a, ## __VA_ARGS__) -#define DC_LOG_FEATURE_OVERRIDE(a, ...) dm_logger_write(DC_LOGGER, LOG_FEATURE_OVERRIDE, a, ## __VA_ARGS__) -#define DC_LOG_DETECTION_EDID_PARSER(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_EDID_PARSER, a, ## __VA_ARGS__) -#define DC_LOG_DETECTION_DP_CAPS(a, ...) dm_logger_write(DC_LOGGER, LOG_DETECTION_DP_CAPS, a, ## __VA_ARGS__) -#define DC_LOG_RESOURCE(a, ...) dm_logger_write(DC_LOGGER, LOG_RESOURCE, a, ## __VA_ARGS__) -#define DC_LOG_DML(a, ...) dm_logger_write(DC_LOGGER, LOG_DML, a, ## __VA_ARGS__) -#define DC_LOG_EVENT_MODE_SET(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_MODE_SET, a, ## __VA_ARGS__) -#define DC_LOG_EVENT_DETECTION(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_DETECTION, a, ## __VA_ARGS__) -#define DC_LOG_EVENT_LINK_TRAINING(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_TRAINING, a, ## __VA_ARGS__) -#define DC_LOG_EVENT_LINK_LOSS(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_LINK_LOSS, a, ## __VA_ARGS__) -#define DC_LOG_EVENT_UNDERFLOW(a, ...) dm_logger_write(DC_LOGGER, LOG_EVENT_UNDERFLOW, a, ## __VA_ARGS__) -#define DC_LOG_IF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_IF_TRACE, a, ## __VA_ARGS__) -#define DC_LOG_PERF_TRACE(a, ...) dm_logger_write(DC_LOGGER, LOG_PERF_TRACE, a, ## __VA_ARGS__) +#define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__) +#define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__) +#define DC_LOG_DEBUG(...) DRM_INFO(__VA_ARGS__) +#define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__) +#define DC_LOG_HW_HOTPLUG(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_HW_LINK_TRAINING(...) pr_debug("[HW_LINK_TRAINING]:"__VA_ARGS__) +#define DC_LOG_HW_SET_MODE(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_HW_RESUME_S3(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_HW_AUDIO(...) pr_debug("[HW_AUDIO]:"__VA_ARGS__) +#define DC_LOG_HW_HPD_IRQ(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_MST(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_SCALER(...) pr_debug("[SCALER]:"__VA_ARGS__) +#define DC_LOG_BIOS(...) pr_debug("[BIOS]:"__VA_ARGS__) +#define DC_LOG_BANDWIDTH_CALCS(...) pr_debug("[BANDWIDTH_CALCS]:"__VA_ARGS__) +#define DC_LOG_BANDWIDTH_VALIDATION(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_I2C_AUX(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_SYNC(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_BACKLIGHT(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_FEATURE_OVERRIDE(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_DETECTION_EDID_PARSER(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_DETECTION_DP_CAPS(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_RESOURCE(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_DML(...) pr_debug("[DML]:"__VA_ARGS__) +#define DC_LOG_EVENT_MODE_SET(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_EVENT_DETECTION(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_EVENT_LINK_TRAINING(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_EVENT_LINK_LOSS(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_EVENT_UNDERFLOW(...) DRM_DEBUG_KMS(__VA_ARGS__) +#define DC_LOG_IF_TRACE(...) pr_debug("[IF_TRACE]:"__VA_ARGS__) +#define DC_LOG_PERF_TRACE(...) DRM_DEBUG_KMS(__VA_ARGS__) struct dal_logger; -- cgit v1.2.1 From 01fe3e4876d3799b37e6c712dcfed7cc2cafa3f0 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 15 Mar 2018 13:34:16 -0400 Subject: drm/amd/display: Add vmax/min_sel prints to dcn10_log_hw_state Signed-off-by: Anthony Koo Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++-- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index de5293dc4db3..f3341a2399fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -165,7 +165,7 @@ void dcn10_log_hw_state(struct dc *dc) } DTN_INFO("\n"); - DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin" + DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel" " h_bs h_be h_ss h_se hpol htot vtot underflow\n"); for (i = 0; i < pool->timing_generator_count; i++) { @@ -178,7 +178,7 @@ void dcn10_log_hw_state(struct dc *dc) if ((s.otg_enabled & 1) == 0) continue; - DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %5d %5d %5d" + DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d" " %5d %5d %5d %5d %9d\n", tg->inst, s.v_blank_start, @@ -188,6 +188,8 @@ void dcn10_log_hw_state(struct dc *dc) s.v_sync_a_pol, s.v_total_max, s.v_total_min, + s.v_total_max_sel, + s.v_total_min_sel, s.h_blank_start, s.h_blank_end, s.h_sync_a_start, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index dc921307874a..2c5dbece928e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1229,6 +1229,12 @@ void optc1_read_otg_state(struct optc *optc1, REG_GET(OTG_V_TOTAL_MIN, OTG_V_TOTAL_MIN, &s->v_total_min); + REG_GET(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MAX_SEL, &s->v_total_max_sel); + + REG_GET(OTG_V_TOTAL_CONTROL, + OTG_V_TOTAL_MIN_SEL, &s->v_total_min_sel); + REG_GET_2(OTG_V_SYNC_A, OTG_V_SYNC_A_START, &s->v_sync_a_start, OTG_V_SYNC_A_END, &s->v_sync_a_end); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 5a9a73d69fd6..89e09e5327a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -406,6 +406,8 @@ struct dcn_otg_state { uint32_t v_total; uint32_t v_total_max; uint32_t v_total_min; + uint32_t v_total_min_sel; + uint32_t v_total_max_sel; uint32_t v_sync_a_start; uint32_t v_sync_a_end; uint32_t h_blank_start; -- cgit v1.2.1 From 4c61af8afe855fcf65a09d47c8e330bb1fd1fb4a Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 23 Mar 2018 13:39:27 -0400 Subject: drm/amd/display: Implement dm_get_timestamp We use this to ensure we wait at least 500ms in between eDP disable/enable. Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 89342b48be6b..fe29125215b5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -37,8 +37,10 @@ unsigned long long dm_get_timestamp(struct dc_context *ctx) { - /* TODO: return actual timestamp */ - return 0; + struct timespec64 time; + + getrawmonotonic64(&time); + return timespec64_to_ns(&time); } void dm_perf_trace_timestamp(const char *func_name, unsigned int line) -- cgit v1.2.1 From 78d5d04d118d55b6c51ca787d5debb9ad1b8a391 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 20 Mar 2018 14:53:04 -0400 Subject: drm/amd/display: add delay between panel pwr off to on. As per eDP 1.4 spec, there must be at least 500ms delay between eDP power off and on. This change added time stamp when edp power off, which can be used to calculate duration time when edp power on. If duration less than 500ms, add a wait. Signed-off-by: Charlene Liu Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 7 ++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 9 +++++++ .../amd/display/dc/dce110/dce110_hw_sequencer.c | 29 +++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dm_services.h | 4 +++ 4 files changed, 48 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index fe29125215b5..0229c7edb8ad 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -43,6 +43,13 @@ unsigned long long dm_get_timestamp(struct dc_context *ctx) return timespec64_to_ns(&time); } +unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, + unsigned long long current_time_stamp, + unsigned long long last_time_stamp) +{ + return current_time_stamp - last_time_stamp; +} + void dm_perf_trace_timestamp(const char *func_name, unsigned int line) { } diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index fb4d9eafdc6e..eeff98741293 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -51,6 +51,14 @@ struct link_mst_stream_allocation_table { struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM]; }; +struct time_stamp { + uint64_t edp_poweroff; + uint64_t edp_poweron; +}; + +struct link_trace { + struct time_stamp time_stamp; +}; /* * A link contains one or more sinks and their connected status. * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. @@ -114,6 +122,7 @@ struct dc_link { struct dc_link_status link_status; + struct link_trace link_trace; }; const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index e8df50f30e5b..db2d15dfb831 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -849,6 +849,28 @@ void hwss_edp_power_control( if (power_up != is_panel_powered_on(hwseq)) { /* Send VBIOS command to prompt eDP panel power */ + if (power_up) { + unsigned long long current_ts = dm_get_timestamp(ctx); + unsigned long long duration_in_ms = + dm_get_elapse_time_in_ns( + ctx, + current_ts, + link->link_trace.time_stamp.edp_poweroff) / 1000000; + unsigned long long wait_time_ms = 0; + + /* max 500ms from LCDVDD off to on */ + if (link->link_trace.time_stamp.edp_poweroff == 0) + wait_time_ms = 500; + else if (duration_in_ms < 500) + wait_time_ms = 500 - duration_in_ms; + + if (wait_time_ms) { + msleep(wait_time_ms); + dm_output_to_console("%s: wait %lld ms to power on eDP.\n", + __func__, wait_time_ms); + } + + } DC_LOG_HW_RESUME_S3( "%s: Panel Power action: %s\n", @@ -862,9 +884,14 @@ void hwss_edp_power_control( cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; - bp_result = link_transmitter_control(ctx->dc_bios, &cntl); + if (!power_up) + /*save driver power off time stamp*/ + link->link_trace.time_stamp.edp_poweroff = dm_get_timestamp(ctx); + else + link->link_trace.time_stamp.edp_poweron = dm_get_timestamp(ctx); + if (bp_result != BP_RESULT_OK) DC_LOG_ERROR( "%s: Panel Power bp_result: %d\n", diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 22e7ee7dcd26..8eafe1af8a5e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -341,6 +341,10 @@ bool dm_dmcu_set_pipe(struct dc_context *ctx, unsigned int controller_id); unsigned long long dm_get_timestamp(struct dc_context *ctx); +unsigned long long dm_get_elapse_time_in_ns(struct dc_context *ctx, + unsigned long long current_time_stamp, + unsigned long long last_time_stamp); + /* * performance tracing */ -- cgit v1.2.1 From 1402c605173bbbb1c2f7e615a1708a6ee61c69c3 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 14 Mar 2018 17:56:58 -0400 Subject: drm/amd/display: Set all update flags when we have full update To prevent future optimization related bugs, just set all update flags when we have a full update, since we know we want to reprogram everything in that case. Signed-off-by: Eric Yang Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 6f4ad67ffca6..b331d9e78cdb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1208,7 +1208,7 @@ enum surface_update_type dc_check_update_surfaces_for_stream( type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); if (type == UPDATE_TYPE_FULL) for (i = 0; i < surface_count; i++) - updates[i].surface->update_flags.bits.full_update = 1; + updates[i].surface->update_flags.raw = 0xFFFFFFFF; return type; } -- cgit v1.2.1 From 0c41891c81c017b5040b211f0b294ff5eb440d44 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 19 Mar 2018 14:41:59 -0400 Subject: drm/amd/display: Refactor stream encoder for HW review Move DCN1 implementation of stream encoder to new file (instead of common dce_stream_encoder.c). Cleanup code related to different implementation due to register definition differences. Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 36 +- .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 1505 ++++++++++++++++++++ .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 584 ++++++++ 4 files changed, 2101 insertions(+), 26 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 5469bdfe19f3..5c69743a4b4f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ - dcn10_hubbub.o + dcn10_hubbub.o dcn10_stream_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 7ad290cbc730..f305f65675d8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -39,7 +39,7 @@ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_opp.h" #include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" +#include "dcn10/dcn10_stream_encoder.h" #include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" @@ -166,36 +166,22 @@ static const struct dce_abm_mask abm_mask = { #define stream_enc_regs(id)\ [id] = {\ - SE_DCN_REG_LIST(id),\ - .TMDS_CNTL = 0,\ - .AFMT_AVI_INFO0 = 0,\ - .AFMT_AVI_INFO1 = 0,\ - .AFMT_AVI_INFO2 = 0,\ - .AFMT_AVI_INFO3 = 0,\ + SE_DCN_REG_LIST(id)\ } -static const struct dce110_stream_enc_registers stream_enc_regs[] = { +static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(0), stream_enc_regs(1), stream_enc_regs(2), stream_enc_regs(3), }; -static const struct dce_stream_encoder_shift se_shift = { +static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN10(__SHIFT) }; -static const struct dce_stream_encoder_mask se_mask = { - SE_COMMON_MASK_SH_LIST_DCN10(_MASK), - .AFMT_GENERIC0_UPDATE = 0, - .AFMT_GENERIC2_UPDATE = 0, - .DP_DYN_RANGE = 0, - .DP_YCBCR_RANGE = 0, - .HDMI_AVI_INFO_SEND = 0, - .HDMI_AVI_INFO_CONT = 0, - .HDMI_AVI_INFO_LINE = 0, - .DP_SEC_AVI_ENABLE = 0, - .AFMT_AVI_INFO_VERSION = 0 +static const struct dcn10_stream_encoder_mask se_mask = { + SE_COMMON_MASK_SH_LIST_DCN10(_MASK) }; #define audio_regs(id)\ @@ -653,16 +639,16 @@ static struct stream_encoder *dcn10_stream_encoder_create( enum engine_id eng_id, struct dc_context *ctx) { - struct dce110_stream_encoder *enc110 = - kzalloc(sizeof(struct dce110_stream_encoder), GFP_KERNEL); + struct dcn10_stream_encoder *enc1 = + kzalloc(sizeof(struct dcn10_stream_encoder), GFP_KERNEL); - if (!enc110) + if (!enc1) return NULL; - dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, + dcn10_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); - return &enc110->base; + return &enc1->base; } static const struct dce_hwseq_registers hwseq_reg = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c new file mode 100644 index 000000000000..0413c707b921 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -0,0 +1,1505 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "dc_bios_types.h" +#include "dcn10_stream_encoder.h" + +#include "reg_helper.h" +#define DC_LOGGER \ + enc1->base.ctx->logger +enum DP_PIXEL_ENCODING { +DP_PIXEL_ENCODING_RGB444 = 0x00000000, +DP_PIXEL_ENCODING_YCBCR422 = 0x00000001, +DP_PIXEL_ENCODING_YCBCR444 = 0x00000002, +DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003, +DP_PIXEL_ENCODING_Y_ONLY = 0x00000004, +DP_PIXEL_ENCODING_YCBCR420 = 0x00000005, +DP_PIXEL_ENCODING_RESERVED = 0x00000006, +}; + + +enum DP_COMPONENT_DEPTH { +DP_COMPONENT_DEPTH_6BPC = 0x00000000, +DP_COMPONENT_DEPTH_8BPC = 0x00000001, +DP_COMPONENT_DEPTH_10BPC = 0x00000002, +DP_COMPONENT_DEPTH_12BPC = 0x00000003, +DP_COMPONENT_DEPTH_16BPC = 0x00000004, +DP_COMPONENT_DEPTH_RESERVED = 0x00000005, +}; + + +#define REG(reg)\ + (enc1->regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc1->se_shift->field_name, enc1->se_mask->field_name + +#define VBI_LINE_0 0 +#define DP_BLANK_MAX_RETRY 20 +#define HDMI_CLOCK_CHANNEL_RATE_MORE_340M 340000 + + +enum { + DP_MST_UPDATE_MAX_RETRY = 50 +}; + +#define CTX \ + enc1->base.ctx + +static void enc1_update_generic_info_packet( + struct dcn10_stream_encoder *enc1, + uint32_t packet_index, + const struct dc_info_packet *info_packet) +{ + uint32_t regval; + /* TODOFPGA Figure out a proper number for max_retries polling for lock + * use 50 for now. + */ + uint32_t max_retries = 50; + + /*we need turn on clock before programming AFMT block*/ + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); + + if (packet_index >= 8) + ASSERT(0); + + /* poll dig_update_lock is not locked -> asic internal signal + * assume otg master lock will unlock it + */ +/* REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, + 0, 10, max_retries);*/ + + /* check if HW reading GSP memory */ + REG_WAIT(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, + 0, 10, max_retries); + + /* HW does is not reading GSP memory not reading too long -> + * something wrong. clear GPS memory access and notify? + * hw SW is writing to GSP memory + */ + REG_UPDATE(AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, 1); + + /* choose which generic packet to use */ + regval = REG_READ(AFMT_VBI_PACKET_CONTROL); + REG_UPDATE(AFMT_VBI_PACKET_CONTROL, + AFMT_GENERIC_INDEX, packet_index); + + /* write generic packet header + * (4th byte is for GENERIC0 only) + */ + REG_SET_4(AFMT_GENERIC_HDR, 0, + AFMT_GENERIC_HB0, info_packet->hb0, + AFMT_GENERIC_HB1, info_packet->hb1, + AFMT_GENERIC_HB2, info_packet->hb2, + AFMT_GENERIC_HB3, info_packet->hb3); + + /* write generic packet contents + * (we never use last 4 bytes) + * there are 8 (0-7) mmDIG0_AFMT_GENERIC0_x registers + */ + { + const uint32_t *content = + (const uint32_t *) &info_packet->sb[0]; + + REG_WRITE(AFMT_GENERIC_0, *content++); + REG_WRITE(AFMT_GENERIC_1, *content++); + REG_WRITE(AFMT_GENERIC_2, *content++); + REG_WRITE(AFMT_GENERIC_3, *content++); + REG_WRITE(AFMT_GENERIC_4, *content++); + REG_WRITE(AFMT_GENERIC_5, *content++); + REG_WRITE(AFMT_GENERIC_6, *content++); + REG_WRITE(AFMT_GENERIC_7, *content); + } + + switch (packet_index) { + case 0: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC0_FRAME_UPDATE, 1); + break; + case 1: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC1_FRAME_UPDATE, 1); + break; + case 2: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC2_FRAME_UPDATE, 1); + break; + case 3: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC3_FRAME_UPDATE, 1); + break; + case 4: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC4_FRAME_UPDATE, 1); + break; + case 5: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC5_FRAME_UPDATE, 1); + break; + case 6: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC6_FRAME_UPDATE, 1); + break; + case 7: + REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, + AFMT_GENERIC7_FRAME_UPDATE, 1); + break; + default: + break; + } +} + +static void enc1_update_hdmi_info_packet( + struct dcn10_stream_encoder *enc1, + uint32_t packet_index, + const struct dc_info_packet *info_packet) +{ + uint32_t cont, send, line; + + if (info_packet->valid) { + enc1_update_generic_info_packet( + enc1, + packet_index, + info_packet); + + /* enable transmission of packet(s) - + * packet transmission begins on the next frame + */ + cont = 1; + /* send packet(s) every frame */ + send = 1; + /* select line number to send packets on */ + line = 2; + } else { + cont = 0; + send = 0; + line = 0; + } + + /* choose which generic packet control to use */ + switch (packet_index) { + case 0: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, + HDMI_GENERIC0_CONT, cont, + HDMI_GENERIC0_SEND, send, + HDMI_GENERIC0_LINE, line); + break; + case 1: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL0, + HDMI_GENERIC1_CONT, cont, + HDMI_GENERIC1_SEND, send, + HDMI_GENERIC1_LINE, line); + break; + case 2: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, + HDMI_GENERIC0_CONT, cont, + HDMI_GENERIC0_SEND, send, + HDMI_GENERIC0_LINE, line); + break; + case 3: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL1, + HDMI_GENERIC1_CONT, cont, + HDMI_GENERIC1_SEND, send, + HDMI_GENERIC1_LINE, line); + break; + case 4: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, + HDMI_GENERIC0_CONT, cont, + HDMI_GENERIC0_SEND, send, + HDMI_GENERIC0_LINE, line); + break; + case 5: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2, + HDMI_GENERIC1_CONT, cont, + HDMI_GENERIC1_SEND, send, + HDMI_GENERIC1_LINE, line); + break; + case 6: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, + HDMI_GENERIC0_CONT, cont, + HDMI_GENERIC0_SEND, send, + HDMI_GENERIC0_LINE, line); + break; + case 7: + REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL3, + HDMI_GENERIC1_CONT, cont, + HDMI_GENERIC1_SEND, send, + HDMI_GENERIC1_LINE, line); + break; + default: + /* invalid HW packet index */ + DC_LOG_WARNING( + "Invalid HW packet index: %s()\n", + __func__); + return; + } +} + +/* setup stream encoder in dp mode */ +static void enc1_stream_encoder_dp_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space) +{ + uint32_t h_active_start; + uint32_t v_active_start; + uint32_t misc0 = 0; + uint32_t misc1 = 0; + uint32_t h_blank; + uint32_t h_back_porch; + uint8_t synchronous_clock = 0; /* asynchronous mode */ + uint8_t colorimetry_bpc; + uint8_t dynamic_range_rgb = 0; /*full range*/ + uint8_t dynamic_range_ycbcr = 1; /*bt709*/ + + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(DP_DB_CNTL, DP_DB_DISABLE, 1); + + /* set pixel encoding */ + switch (crtc_timing->pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, + DP_PIXEL_ENCODING_YCBCR422); + break; + case PIXEL_ENCODING_YCBCR444: + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, + DP_PIXEL_ENCODING_YCBCR444); + + if (crtc_timing->flags.Y_ONLY) + if (crtc_timing->display_color_depth != COLOR_DEPTH_666) + /* HW testing only, no use case yet. + * Color depth of Y-only could be + * 8, 10, 12, 16 bits + */ + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, + DP_PIXEL_ENCODING_Y_ONLY); + /* Note: DP_MSA_MISC1 bit 7 is the indicator + * of Y-only mode. + * This bit is set in HW if register + * DP_PIXEL_ENCODING is programmed to 0x4 + */ + break; + case PIXEL_ENCODING_YCBCR420: + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, + DP_PIXEL_ENCODING_YCBCR420); + REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); + break; + default: + REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, + DP_PIXEL_ENCODING_RGB444); + break; + } + + misc1 = REG_READ(DP_MSA_MISC); + + /* set color depth */ + + switch (crtc_timing->display_color_depth) { + case COLOR_DEPTH_666: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + 0); + break; + case COLOR_DEPTH_888: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_DEPTH_8BPC); + break; + case COLOR_DEPTH_101010: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_DEPTH_10BPC); + + break; + case COLOR_DEPTH_121212: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_DEPTH_12BPC); + break; + default: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_DEPTH_6BPC); + break; + } + + /* set dynamic range and YCbCr range */ + + switch (crtc_timing->display_color_depth) { + case COLOR_DEPTH_666: + colorimetry_bpc = 0; + break; + case COLOR_DEPTH_888: + colorimetry_bpc = 1; + break; + case COLOR_DEPTH_101010: + colorimetry_bpc = 2; + break; + case COLOR_DEPTH_121212: + colorimetry_bpc = 3; + break; + default: + colorimetry_bpc = 0; + break; + } + + misc0 = misc0 | synchronous_clock; + misc0 = colorimetry_bpc << 5; + + switch (output_color_space) { + case COLOR_SPACE_SRGB: + misc0 = misc0 | 0x0; + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + dynamic_range_rgb = 0; /*full range*/ + break; + case COLOR_SPACE_SRGB_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + dynamic_range_rgb = 1; /*limited range*/ + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + dynamic_range_ycbcr = 0; /*bt601*/ + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */ + misc1 = misc1 & ~0x80; /* bit7 = 0*/ + dynamic_range_ycbcr = 1; /*bt709*/ + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */ + else if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) + misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */ + break; + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + dynamic_range_rgb = 1; /*limited range*/ + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_YCBCR: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_ADOBERGB: + case COLOR_SPACE_DCIP3: + case COLOR_SPACE_XV_YCC_709: + case COLOR_SPACE_XV_YCC_601: + case COLOR_SPACE_DISPLAYNATIVE: + case COLOR_SPACE_DOLBYVISION: + case COLOR_SPACE_APPCTRL: + case COLOR_SPACE_CUSTOMPOINTS: + case COLOR_SPACE_UNKNOWN: + /* do nothing */ + break; + } + + REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0); + REG_WRITE(DP_MSA_MISC, misc1); /* MSA_MISC1 */ + + /* dcn new register + * dc_crtc_timing is vesa dmt struct. data from edid + */ + REG_SET_2(DP_MSA_TIMING_PARAM1, 0, + DP_MSA_HTOTAL, crtc_timing->h_total, + DP_MSA_VTOTAL, crtc_timing->v_total); + + /* calculate from vesa timing parameters + * h_active_start related to leading edge of sync + */ + + h_blank = crtc_timing->h_total - crtc_timing->h_border_left - + crtc_timing->h_addressable - crtc_timing->h_border_right; + + h_back_porch = h_blank - crtc_timing->h_front_porch - + crtc_timing->h_sync_width; + + /* start at beginning of left border */ + h_active_start = crtc_timing->h_sync_width + h_back_porch; + + + v_active_start = crtc_timing->v_total - crtc_timing->v_border_top - + crtc_timing->v_addressable - crtc_timing->v_border_bottom - + crtc_timing->v_front_porch; + + + /* start at beginning of left border */ + REG_SET_2(DP_MSA_TIMING_PARAM2, 0, + DP_MSA_HSTART, h_active_start, + DP_MSA_VSTART, v_active_start); + + REG_SET_4(DP_MSA_TIMING_PARAM3, 0, + DP_MSA_HSYNCWIDTH, + crtc_timing->h_sync_width, + DP_MSA_HSYNCPOLARITY, + !crtc_timing->flags.HSYNC_POSITIVE_POLARITY, + DP_MSA_VSYNCWIDTH, + crtc_timing->v_sync_width, + DP_MSA_VSYNCPOLARITY, + !crtc_timing->flags.VSYNC_POSITIVE_POLARITY); + + /* HWDITH include border or overscan */ + REG_SET_2(DP_MSA_TIMING_PARAM4, 0, + DP_MSA_HWIDTH, crtc_timing->h_border_left + + crtc_timing->h_addressable + crtc_timing->h_border_right, + DP_MSA_VHEIGHT, crtc_timing->v_border_top + + crtc_timing->v_addressable + crtc_timing->v_border_bottom); +} + +static void enc1_stream_encoder_set_stream_attribute_helper( + struct dcn10_stream_encoder *enc1, + struct dc_crtc_timing *crtc_timing) +{ + switch (crtc_timing->pixel_encoding) { + case PIXEL_ENCODING_YCBCR422: + REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 1); + break; + default: + REG_UPDATE(DIG_FE_CNTL, TMDS_PIXEL_ENCODING, 0); + break; + } + REG_UPDATE(DIG_FE_CNTL, TMDS_COLOR_FORMAT, 0); +} + +/* setup stream encoder in hdmi mode */ +static void enc1_stream_encoder_hdmi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + int actual_pix_clk_khz, + bool enable_audio) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + struct bp_encoder_control cntl = {0}; + + cntl.action = ENCODER_CONTROL_SETUP; + cntl.engine_id = enc1->base.id; + cntl.signal = SIGNAL_TYPE_HDMI_TYPE_A; + cntl.enable_dp_audio = enable_audio; + cntl.pixel_clock = actual_pix_clk_khz; + cntl.lanes_number = LANE_COUNT_FOUR; + + if (enc1->base.bp->funcs->encoder_control( + enc1->base.bp, &cntl) != BP_RESULT_OK) + return; + + enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); + + /* setup HDMI engine */ + REG_UPDATE_5(HDMI_CONTROL, + HDMI_PACKET_GEN_VERSION, 1, + HDMI_KEEPOUT_MODE, 1, + HDMI_DEEP_COLOR_ENABLE, 0, + HDMI_DATA_SCRAMBLE_EN, 0, + HDMI_CLOCK_CHANNEL_RATE, 0); + + + switch (crtc_timing->display_color_depth) { + case COLOR_DEPTH_888: + REG_UPDATE(HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); + break; + case COLOR_DEPTH_101010: + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 1, + HDMI_DEEP_COLOR_ENABLE, 0); + } else { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 1, + HDMI_DEEP_COLOR_ENABLE, 1); + } + break; + case COLOR_DEPTH_121212: + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 2, + HDMI_DEEP_COLOR_ENABLE, 0); + } else { + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 2, + HDMI_DEEP_COLOR_ENABLE, 1); + } + break; + case COLOR_DEPTH_161616: + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DEEP_COLOR_DEPTH, 3, + HDMI_DEEP_COLOR_ENABLE, 1); + break; + default: + break; + } + + if (actual_pix_clk_khz >= HDMI_CLOCK_CHANNEL_RATE_MORE_340M) { + /* enable HDMI data scrambler + * HDMI_CLOCK_CHANNEL_RATE_MORE_340M + * Clock channel frequency is 1/4 of character rate. + */ + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DATA_SCRAMBLE_EN, 1, + HDMI_CLOCK_CHANNEL_RATE, 1); + } else if (crtc_timing->flags.LTE_340MCSC_SCRAMBLE) { + + /* TODO: New feature for DCE11, still need to implement */ + + /* enable HDMI data scrambler + * HDMI_CLOCK_CHANNEL_FREQ_EQUAL_TO_CHAR_RATE + * Clock channel frequency is the same + * as character rate + */ + REG_UPDATE_2(HDMI_CONTROL, + HDMI_DATA_SCRAMBLE_EN, 1, + HDMI_CLOCK_CHANNEL_RATE, 0); + } + + + REG_UPDATE_3(HDMI_VBI_PACKET_CONTROL, + HDMI_GC_CONT, 1, + HDMI_GC_SEND, 1, + HDMI_NULL_SEND, 1); + + /* following belongs to audio */ + REG_UPDATE(HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); + + REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); + + REG_UPDATE(HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, + VBI_LINE_0 + 2); + + REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, 0); +} + +/* setup stream encoder in dvi mode */ +static void enc1_stream_encoder_dvi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + bool is_dual_link) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + struct bp_encoder_control cntl = {0}; + + cntl.action = ENCODER_CONTROL_SETUP; + cntl.engine_id = enc1->base.id; + cntl.signal = is_dual_link ? + SIGNAL_TYPE_DVI_DUAL_LINK : SIGNAL_TYPE_DVI_SINGLE_LINK; + cntl.enable_dp_audio = false; + cntl.pixel_clock = crtc_timing->pix_clk_khz; + cntl.lanes_number = (is_dual_link) ? LANE_COUNT_EIGHT : LANE_COUNT_FOUR; + + if (enc1->base.bp->funcs->encoder_control( + enc1->base.bp, &cntl) != BP_RESULT_OK) + return; + + ASSERT(crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB); + ASSERT(crtc_timing->display_color_depth == COLOR_DEPTH_888); + enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); +} + +static void enc1_stream_encoder_set_mst_bandwidth( + struct stream_encoder *enc, + struct fixed31_32 avg_time_slots_per_mtp) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t x = dal_fixed31_32_floor( + avg_time_slots_per_mtp); + uint32_t y = dal_fixed31_32_ceil( + dal_fixed31_32_shl( + dal_fixed31_32_sub_int( + avg_time_slots_per_mtp, + x), + 26)); + + REG_SET_2(DP_MSE_RATE_CNTL, 0, + DP_MSE_RATE_X, x, + DP_MSE_RATE_Y, y); + + /* wait for update to be completed on the link */ + /* i.e. DP_MSE_RATE_UPDATE_PENDING field (read only) */ + /* is reset to 0 (not pending) */ + REG_WAIT(DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, + 0, + 10, DP_MST_UPDATE_MAX_RETRY); +} + +static void enc1_stream_encoder_update_hdmi_info_packets( + struct stream_encoder *enc, + const struct encoder_info_frame *info_frame) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* for bring up, disable dp double TODO */ + REG_UPDATE(HDMI_DB_CONTROL, HDMI_DB_DISABLE, 1); + + enc1_update_hdmi_info_packet(enc1, 0, &info_frame->avi); + enc1_update_hdmi_info_packet(enc1, 1, &info_frame->vendor); + enc1_update_hdmi_info_packet(enc1, 2, &info_frame->gamut); + enc1_update_hdmi_info_packet(enc1, 3, &info_frame->spd); + enc1_update_hdmi_info_packet(enc1, 4, &info_frame->hdrsmd); +} + +static void enc1_stream_encoder_stop_hdmi_info_packets( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* stop generic packets 0 & 1 on HDMI */ + REG_SET_6(HDMI_GENERIC_PACKET_CONTROL0, 0, + HDMI_GENERIC1_CONT, 0, + HDMI_GENERIC1_LINE, 0, + HDMI_GENERIC1_SEND, 0, + HDMI_GENERIC0_CONT, 0, + HDMI_GENERIC0_LINE, 0, + HDMI_GENERIC0_SEND, 0); + + /* stop generic packets 2 & 3 on HDMI */ + REG_SET_6(HDMI_GENERIC_PACKET_CONTROL1, 0, + HDMI_GENERIC0_CONT, 0, + HDMI_GENERIC0_LINE, 0, + HDMI_GENERIC0_SEND, 0, + HDMI_GENERIC1_CONT, 0, + HDMI_GENERIC1_LINE, 0, + HDMI_GENERIC1_SEND, 0); + + /* stop generic packets 2 & 3 on HDMI */ + REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0, + HDMI_GENERIC0_CONT, 0, + HDMI_GENERIC0_LINE, 0, + HDMI_GENERIC0_SEND, 0, + HDMI_GENERIC1_CONT, 0, + HDMI_GENERIC1_LINE, 0, + HDMI_GENERIC1_SEND, 0); + + REG_SET_6(HDMI_GENERIC_PACKET_CONTROL3, 0, + HDMI_GENERIC0_CONT, 0, + HDMI_GENERIC0_LINE, 0, + HDMI_GENERIC0_SEND, 0, + HDMI_GENERIC1_CONT, 0, + HDMI_GENERIC1_LINE, 0, + HDMI_GENERIC1_SEND, 0); +} + +static void enc1_stream_encoder_update_dp_info_packets( + struct stream_encoder *enc, + const struct encoder_info_frame *info_frame) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t value = REG_READ(DP_SEC_CNTL); + + if (info_frame->vsc.valid) + enc1_update_generic_info_packet( + enc1, + 0, /* packetIndex */ + &info_frame->vsc); + + if (info_frame->spd.valid) + enc1_update_generic_info_packet( + enc1, + 2, /* packetIndex */ + &info_frame->spd); + + if (info_frame->hdrsmd.valid) + enc1_update_generic_info_packet( + enc1, + 3, /* packetIndex */ + &info_frame->hdrsmd); + + /* enable/disable transmission of packet(s). + * If enabled, packet transmission begins on the next frame + */ + REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid); + REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); + REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); + + /* This bit is the master enable bit. + * When enabling secondary stream engine, + * this master bit must also be set. + * This register shared with audio info frame. + * Therefore we need to enable master bit + * if at least on of the fields is not 0 + */ + if (value) + REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); +} + +static void enc1_stream_encoder_stop_dp_info_packets( + struct stream_encoder *enc) +{ + /* stop generic packets on DP */ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t value = REG_READ(DP_SEC_CNTL); + + REG_SET_10(DP_SEC_CNTL, 0, + DP_SEC_GSP0_ENABLE, 0, + DP_SEC_GSP1_ENABLE, 0, + DP_SEC_GSP2_ENABLE, 0, + DP_SEC_GSP3_ENABLE, 0, + DP_SEC_GSP4_ENABLE, 0, + DP_SEC_GSP5_ENABLE, 0, + DP_SEC_GSP6_ENABLE, 0, + DP_SEC_GSP7_ENABLE, 0, + DP_SEC_MPG_ENABLE, 0, + DP_SEC_STREAM_ENABLE, 0); + + /* this register shared with audio info frame. + * therefore we need to keep master enabled + * if at least one of the fields is not 0 */ + + if (value) + REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); + +} + +static void enc1_stream_encoder_dp_blank( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t retries = 0; + uint32_t reg1 = 0; + uint32_t max_retries = DP_BLANK_MAX_RETRY * 10; + + /* Note: For CZ, we are changing driver default to disable + * stream deferred to next VBLANK. If results are positive, we + * will make the same change to all DCE versions. There are a + * handful of panels that cannot handle disable stream at + * HBLANK and will result in a white line flash across the + * screen on stream disable. + */ + REG_GET(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, ®1); + if ((reg1 & 0x1) == 0) + /*stream not enabled*/ + return; + /* Specify the video stream disable point + * (2 = start of the next vertical blank) + */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2); + /* Larger delay to wait until VBLANK - use max retry of + * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode + + * a little more because we may not trust delay accuracy. + */ + max_retries = DP_BLANK_MAX_RETRY * 150; + + /* disable DP stream */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); + + /* the encoder stops sending the video stream + * at the start of the vertical blanking. + * Poll for DP_VID_STREAM_STATUS == 0 + */ + + REG_WAIT(DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, + 0, + 10, max_retries); + + ASSERT(retries <= max_retries); + + /* Tell the DP encoder to ignore timing from CRTC, must be done after + * the polling. If we set DP_STEER_FIFO_RESET before DP stream blank is + * complete, stream status will be stuck in video stream enabled state, + * i.e. DP_VID_STREAM_STATUS stuck at 1. + */ + + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true); +} + +/* output video stream to link encoder */ +static void enc1_stream_encoder_dp_unblank( + struct stream_encoder *enc, + const struct encoder_unblank_param *param) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) { + uint32_t n_vid = 0x8000; + uint32_t m_vid; + + /* M / N = Fstream / Flink + * m_vid / n_vid = pixel rate / link rate + */ + + uint64_t m_vid_l = n_vid; + + m_vid_l *= param->pixel_clk_khz; + m_vid_l = div_u64(m_vid_l, + param->link_settings.link_rate + * LINK_RATE_REF_FREQ_IN_KHZ); + + m_vid = (uint32_t) m_vid_l; + + /* enable auto measurement */ + + REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 0); + + /* auto measurement need 1 full 0x8000 symbol cycle to kick in, + * therefore program initial value for Mvid and Nvid + */ + + REG_UPDATE(DP_VID_N, DP_VID_N, n_vid); + + REG_UPDATE(DP_VID_M, DP_VID_M, m_vid); + + REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1); + } + + /* set DIG_START to 0x1 to resync FIFO */ + + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + + /* switch DP encoder to CRTC data */ + + REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 0); + + /* wait 100us for DIG/DP logic to prime + * (i.e. a few video lines) + */ + udelay(100); + + /* the hardware would start sending video at the start of the next DP + * frame (i.e. rising edge of the vblank). + * NOTE: We used to program DP_VID_STREAM_DIS_DEFER = 2 here, but this + * register has no effect on enable transition! HW always guarantees + * VID_STREAM enable at start of next frame, and this is not + * programmable + */ + + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); +} + +static void enc1_stream_encoder_set_avmute( + struct stream_encoder *enc, + bool enable) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + unsigned int value = enable ? 1 : 0; + + REG_UPDATE(HDMI_GC, HDMI_GC_AVMUTE, value); +} + + +#define DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT 0x8000 +#define DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC 1 + +#include "include/audio_types.h" + +/** +* speakersToChannels +* +* @brief +* translate speakers to channels +* +* FL - Front Left +* FR - Front Right +* RL - Rear Left +* RR - Rear Right +* RC - Rear Center +* FC - Front Center +* FLC - Front Left Center +* FRC - Front Right Center +* RLC - Rear Left Center +* RRC - Rear Right Center +* LFE - Low Freq Effect +* +* FC +* FLC FRC +* FL FR +* +* LFE +* () +* +* +* RL RR +* RLC RRC +* RC +* +* ch 8 7 6 5 4 3 2 1 +* 0b00000011 - - - - - - FR FL +* 0b00000111 - - - - - LFE FR FL +* 0b00001011 - - - - FC - FR FL +* 0b00001111 - - - - FC LFE FR FL +* 0b00010011 - - - RC - - FR FL +* 0b00010111 - - - RC - LFE FR FL +* 0b00011011 - - - RC FC - FR FL +* 0b00011111 - - - RC FC LFE FR FL +* 0b00110011 - - RR RL - - FR FL +* 0b00110111 - - RR RL - LFE FR FL +* 0b00111011 - - RR RL FC - FR FL +* 0b00111111 - - RR RL FC LFE FR FL +* 0b01110011 - RC RR RL - - FR FL +* 0b01110111 - RC RR RL - LFE FR FL +* 0b01111011 - RC RR RL FC - FR FL +* 0b01111111 - RC RR RL FC LFE FR FL +* 0b11110011 RRC RLC RR RL - - FR FL +* 0b11110111 RRC RLC RR RL - LFE FR FL +* 0b11111011 RRC RLC RR RL FC - FR FL +* 0b11111111 RRC RLC RR RL FC LFE FR FL +* 0b11000011 FRC FLC - - - - FR FL +* 0b11000111 FRC FLC - - - LFE FR FL +* 0b11001011 FRC FLC - - FC - FR FL +* 0b11001111 FRC FLC - - FC LFE FR FL +* 0b11010011 FRC FLC - RC - - FR FL +* 0b11010111 FRC FLC - RC - LFE FR FL +* 0b11011011 FRC FLC - RC FC - FR FL +* 0b11011111 FRC FLC - RC FC LFE FR FL +* 0b11110011 FRC FLC RR RL - - FR FL +* 0b11110111 FRC FLC RR RL - LFE FR FL +* 0b11111011 FRC FLC RR RL FC - FR FL +* 0b11111111 FRC FLC RR RL FC LFE FR FL +* +* @param +* speakers - speaker information as it comes from CEA audio block +*/ +/* translate speakers to channels */ + +union audio_cea_channels { + uint8_t all; + struct audio_cea_channels_bits { + uint32_t FL:1; + uint32_t FR:1; + uint32_t LFE:1; + uint32_t FC:1; + uint32_t RL_RC:1; + uint32_t RR:1; + uint32_t RC_RLC_FLC:1; + uint32_t RRC_FRC:1; + } channels; +}; + +struct audio_clock_info { + /* pixel clock frequency*/ + uint32_t pixel_clock_in_10khz; + /* N - 32KHz audio */ + uint32_t n_32khz; + /* CTS - 32KHz audio*/ + uint32_t cts_32khz; + uint32_t n_44khz; + uint32_t cts_44khz; + uint32_t n_48khz; + uint32_t cts_48khz; +}; + +/* 25.2MHz/1.001*/ +/* 25.2MHz/1.001*/ +/* 25.2MHz*/ +/* 27MHz */ +/* 27MHz*1.001*/ +/* 27MHz*1.001*/ +/* 54MHz*/ +/* 54MHz*1.001*/ +/* 74.25MHz/1.001*/ +/* 74.25MHz*/ +/* 148.5MHz/1.001*/ +/* 148.5MHz*/ + +static const struct audio_clock_info audio_clock_info_table[16] = { + {2517, 4576, 28125, 7007, 31250, 6864, 28125}, + {2518, 4576, 28125, 7007, 31250, 6864, 28125}, + {2520, 4096, 25200, 6272, 28000, 6144, 25200}, + {2700, 4096, 27000, 6272, 30000, 6144, 27000}, + {2702, 4096, 27027, 6272, 30030, 6144, 27027}, + {2703, 4096, 27027, 6272, 30030, 6144, 27027}, + {5400, 4096, 54000, 6272, 60000, 6144, 54000}, + {5405, 4096, 54054, 6272, 60060, 6144, 54054}, + {7417, 11648, 210937, 17836, 234375, 11648, 140625}, + {7425, 4096, 74250, 6272, 82500, 6144, 74250}, + {14835, 11648, 421875, 8918, 234375, 5824, 140625}, + {14850, 4096, 148500, 6272, 165000, 6144, 148500}, + {29670, 5824, 421875, 4459, 234375, 5824, 281250}, + {29700, 3072, 222750, 4704, 247500, 5120, 247500}, + {59340, 5824, 843750, 8918, 937500, 5824, 562500}, + {59400, 3072, 445500, 9408, 990000, 6144, 594000} +}; + +static const struct audio_clock_info audio_clock_info_table_36bpc[14] = { + {2517, 9152, 84375, 7007, 48875, 9152, 56250}, + {2518, 9152, 84375, 7007, 48875, 9152, 56250}, + {2520, 4096, 37800, 6272, 42000, 6144, 37800}, + {2700, 4096, 40500, 6272, 45000, 6144, 40500}, + {2702, 8192, 81081, 6272, 45045, 8192, 54054}, + {2703, 8192, 81081, 6272, 45045, 8192, 54054}, + {5400, 4096, 81000, 6272, 90000, 6144, 81000}, + {5405, 4096, 81081, 6272, 90090, 6144, 81081}, + {7417, 11648, 316406, 17836, 351562, 11648, 210937}, + {7425, 4096, 111375, 6272, 123750, 6144, 111375}, + {14835, 11648, 632812, 17836, 703125, 11648, 421875}, + {14850, 4096, 222750, 6272, 247500, 6144, 222750}, + {29670, 5824, 632812, 8918, 703125, 5824, 421875}, + {29700, 4096, 445500, 4704, 371250, 5120, 371250} +}; + +static const struct audio_clock_info audio_clock_info_table_48bpc[14] = { + {2517, 4576, 56250, 7007, 62500, 6864, 56250}, + {2518, 4576, 56250, 7007, 62500, 6864, 56250}, + {2520, 4096, 50400, 6272, 56000, 6144, 50400}, + {2700, 4096, 54000, 6272, 60000, 6144, 54000}, + {2702, 4096, 54054, 6267, 60060, 8192, 54054}, + {2703, 4096, 54054, 6272, 60060, 8192, 54054}, + {5400, 4096, 108000, 6272, 120000, 6144, 108000}, + {5405, 4096, 108108, 6272, 120120, 6144, 108108}, + {7417, 11648, 421875, 17836, 468750, 11648, 281250}, + {7425, 4096, 148500, 6272, 165000, 6144, 148500}, + {14835, 11648, 843750, 8918, 468750, 11648, 281250}, + {14850, 4096, 297000, 6272, 330000, 6144, 297000}, + {29670, 5824, 843750, 4459, 468750, 5824, 562500}, + {29700, 3072, 445500, 4704, 495000, 5120, 495000} + + +}; + +static union audio_cea_channels speakers_to_channels( + struct audio_speaker_flags speaker_flags) +{ + union audio_cea_channels cea_channels = {0}; + + /* these are one to one */ + cea_channels.channels.FL = speaker_flags.FL_FR; + cea_channels.channels.FR = speaker_flags.FL_FR; + cea_channels.channels.LFE = speaker_flags.LFE; + cea_channels.channels.FC = speaker_flags.FC; + + /* if Rear Left and Right exist move RC speaker to channel 7 + * otherwise to channel 5 + */ + if (speaker_flags.RL_RR) { + cea_channels.channels.RL_RC = speaker_flags.RL_RR; + cea_channels.channels.RR = speaker_flags.RL_RR; + cea_channels.channels.RC_RLC_FLC = speaker_flags.RC; + } else { + cea_channels.channels.RL_RC = speaker_flags.RC; + } + + /* FRONT Left Right Center and REAR Left Right Center are exclusive */ + if (speaker_flags.FLC_FRC) { + cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC; + cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC; + } else { + cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC; + cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC; + } + + return cea_channels; +} + +static uint32_t calc_max_audio_packets_per_line( + const struct audio_crtc_info *crtc_info) +{ + uint32_t max_packets_per_line; + + max_packets_per_line = + crtc_info->h_total - crtc_info->h_active; + + if (crtc_info->pixel_repetition) + max_packets_per_line *= crtc_info->pixel_repetition; + + /* for other hdmi features */ + max_packets_per_line -= 58; + /* for Control Period */ + max_packets_per_line -= 16; + /* Number of Audio Packets per Line */ + max_packets_per_line /= 32; + + return max_packets_per_line; +} + +static void get_audio_clock_info( + enum dc_color_depth color_depth, + uint32_t crtc_pixel_clock_in_khz, + uint32_t actual_pixel_clock_in_khz, + struct audio_clock_info *audio_clock_info) +{ + const struct audio_clock_info *clock_info; + uint32_t index; + uint32_t crtc_pixel_clock_in_10khz = crtc_pixel_clock_in_khz / 10; + uint32_t audio_array_size; + + switch (color_depth) { + case COLOR_DEPTH_161616: + clock_info = audio_clock_info_table_48bpc; + audio_array_size = ARRAY_SIZE( + audio_clock_info_table_48bpc); + break; + case COLOR_DEPTH_121212: + clock_info = audio_clock_info_table_36bpc; + audio_array_size = ARRAY_SIZE( + audio_clock_info_table_36bpc); + break; + default: + clock_info = audio_clock_info_table; + audio_array_size = ARRAY_SIZE( + audio_clock_info_table); + break; + } + + if (clock_info != NULL) { + /* search for exact pixel clock in table */ + for (index = 0; index < audio_array_size; index++) { + if (clock_info[index].pixel_clock_in_10khz > + crtc_pixel_clock_in_10khz) + break; /* not match */ + else if (clock_info[index].pixel_clock_in_10khz == + crtc_pixel_clock_in_10khz) { + /* match found */ + *audio_clock_info = clock_info[index]; + return; + } + } + } + + /* not found */ + if (actual_pixel_clock_in_khz == 0) + actual_pixel_clock_in_khz = crtc_pixel_clock_in_khz; + + /* See HDMI spec the table entry under + * pixel clock of "Other". */ + audio_clock_info->pixel_clock_in_10khz = + actual_pixel_clock_in_khz / 10; + audio_clock_info->cts_32khz = actual_pixel_clock_in_khz; + audio_clock_info->cts_44khz = actual_pixel_clock_in_khz; + audio_clock_info->cts_48khz = actual_pixel_clock_in_khz; + + audio_clock_info->n_32khz = 4096; + audio_clock_info->n_44khz = 6272; + audio_clock_info->n_48khz = 6144; +} + +static void enc1_se_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *audio_info) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + uint32_t speakers = 0; + uint32_t channels = 0; + + ASSERT(audio_info); + if (audio_info == NULL) + /* This should not happen.it does so we don't get BSOD*/ + return; + + speakers = audio_info->flags.info.ALLSPEAKERS; + channels = speakers_to_channels(audio_info->flags.speaker_flags).all; + + /* setup the audio stream source select (audio -> dig mapping) */ + REG_SET(AFMT_AUDIO_SRC_CONTROL, 0, AFMT_AUDIO_SRC_SELECT, az_inst); + + /* Channel allocation */ + REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels); +} + +static void enc1_se_setup_hdmi_audio( + struct stream_encoder *enc, + const struct audio_crtc_info *crtc_info) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + struct audio_clock_info audio_clock_info = {0}; + uint32_t max_packets_per_line; + + /* For now still do calculation, although this field is ignored when + * above HDMI_PACKET_GEN_VERSION set to 1 + */ + max_packets_per_line = calc_max_audio_packets_per_line(crtc_info); + + /* HDMI_AUDIO_PACKET_CONTROL */ + REG_UPDATE_2(HDMI_AUDIO_PACKET_CONTROL, + HDMI_AUDIO_PACKETS_PER_LINE, max_packets_per_line, + HDMI_AUDIO_DELAY_EN, 1); + + /* AFMT_AUDIO_PACKET_CONTROL */ + REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); + + /* AFMT_AUDIO_PACKET_CONTROL2 */ + REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, + AFMT_AUDIO_LAYOUT_OVRD, 0, + AFMT_60958_OSF_OVRD, 0); + + /* HDMI_ACR_PACKET_CONTROL */ + REG_UPDATE_3(HDMI_ACR_PACKET_CONTROL, + HDMI_ACR_AUTO_SEND, 1, + HDMI_ACR_SOURCE, 0, + HDMI_ACR_AUDIO_PRIORITY, 0); + + /* Program audio clock sample/regeneration parameters */ + get_audio_clock_info(crtc_info->color_depth, + crtc_info->requested_pixel_clock, + crtc_info->calculated_pixel_clock, + &audio_clock_info); + DC_LOG_HW_AUDIO( + "\n%s:Input::requested_pixel_clock = %d" \ + "calculated_pixel_clock = %d \n", __func__, \ + crtc_info->requested_pixel_clock, \ + crtc_info->calculated_pixel_clock); + + /* HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK */ + REG_UPDATE(HDMI_ACR_32_0, HDMI_ACR_CTS_32, audio_clock_info.cts_32khz); + + /* HDMI_ACR_32_1__HDMI_ACR_N_32_MASK */ + REG_UPDATE(HDMI_ACR_32_1, HDMI_ACR_N_32, audio_clock_info.n_32khz); + + /* HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK */ + REG_UPDATE(HDMI_ACR_44_0, HDMI_ACR_CTS_44, audio_clock_info.cts_44khz); + + /* HDMI_ACR_44_1__HDMI_ACR_N_44_MASK */ + REG_UPDATE(HDMI_ACR_44_1, HDMI_ACR_N_44, audio_clock_info.n_44khz); + + /* HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK */ + REG_UPDATE(HDMI_ACR_48_0, HDMI_ACR_CTS_48, audio_clock_info.cts_48khz); + + /* HDMI_ACR_48_1__HDMI_ACR_N_48_MASK */ + REG_UPDATE(HDMI_ACR_48_1, HDMI_ACR_N_48, audio_clock_info.n_48khz); + + /* Video driver cannot know in advance which sample rate will + * be used by HD Audio driver + * HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE field is + * programmed below in interruppt callback + */ + + /* AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK & + * AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK + */ + REG_UPDATE_2(AFMT_60958_0, + AFMT_60958_CS_CHANNEL_NUMBER_L, 1, + AFMT_60958_CS_CLOCK_ACCURACY, 0); + + /* AFMT_60958_1 AFMT_60958_CS_CHALNNEL_NUMBER_R */ + REG_UPDATE(AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); + + /* AFMT_60958_2 now keep this settings until + * Programming guide comes out + */ + REG_UPDATE_6(AFMT_60958_2, + AFMT_60958_CS_CHANNEL_NUMBER_2, 3, + AFMT_60958_CS_CHANNEL_NUMBER_3, 4, + AFMT_60958_CS_CHANNEL_NUMBER_4, 5, + AFMT_60958_CS_CHANNEL_NUMBER_5, 6, + AFMT_60958_CS_CHANNEL_NUMBER_6, 7, + AFMT_60958_CS_CHANNEL_NUMBER_7, 8); +} + +static void enc1_se_setup_dp_audio( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* --- DP Audio packet configurations --- */ + + /* ATP Configuration */ + REG_SET(DP_SEC_AUD_N, 0, + DP_SEC_AUD_N, DP_SEC_AUD_N__DP_SEC_AUD_N__DEFAULT); + + /* Async/auto-calc timestamp mode */ + REG_SET(DP_SEC_TIMESTAMP, 0, DP_SEC_TIMESTAMP_MODE, + DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__AUTO_CALC); + + /* --- The following are the registers + * copied from the SetupHDMI --- + */ + + /* AFMT_AUDIO_PACKET_CONTROL */ + REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); + + /* AFMT_AUDIO_PACKET_CONTROL2 */ + /* Program the ATP and AIP next */ + REG_UPDATE_2(AFMT_AUDIO_PACKET_CONTROL2, + AFMT_AUDIO_LAYOUT_OVRD, 0, + AFMT_60958_OSF_OVRD, 0); + + /* AFMT_INFOFRAME_CONTROL0 */ + REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); + + /* AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK */ + REG_UPDATE(AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, 0); +} + +static void enc1_se_enable_audio_clock( + struct stream_encoder *enc, + bool enable) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (REG(AFMT_CNTL) == 0) + return; /* DCE8/10 does not have this register */ + + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, !!enable); + + /* wait for AFMT clock to turn on, + * expectation: this should complete in 1-2 reads + * + * REG_WAIT(AFMT_CNTL, AFMT_AUDIO_CLOCK_ON, !!enable, 1, 10); + * + * TODO: wait for clock_on does not work well. May need HW + * program sequence. But audio seems work normally even without wait + * for clock_on status change + */ +} + +static void enc1_se_enable_dp_audio( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* Enable Audio packets */ + REG_UPDATE(DP_SEC_CNTL, DP_SEC_ASP_ENABLE, 1); + + /* Program the ATP and AIP next */ + REG_UPDATE_2(DP_SEC_CNTL, + DP_SEC_ATP_ENABLE, 1, + DP_SEC_AIP_ENABLE, 1); + + /* Program STREAM_ENABLE after all the other enables. */ + REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); +} + +static void enc1_se_disable_dp_audio( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t value = REG_READ(DP_SEC_CNTL); + + /* Disable Audio packets */ + REG_UPDATE_5(DP_SEC_CNTL, + DP_SEC_ASP_ENABLE, 0, + DP_SEC_ATP_ENABLE, 0, + DP_SEC_AIP_ENABLE, 0, + DP_SEC_ACM_ENABLE, 0, + DP_SEC_STREAM_ENABLE, 0); + + /* This register shared with encoder info frame. Therefore we need to + * keep master enabled if at least on of the fields is not 0 + */ + if (value != 0) + REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); + +} + +void enc1_se_audio_mute_control( + struct stream_encoder *enc, + bool mute) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute); +} + +void enc1_se_dp_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info) +{ + enc1_se_audio_setup(enc, az_inst, info); +} + +void enc1_se_dp_audio_enable( + struct stream_encoder *enc) +{ + enc1_se_enable_audio_clock(enc, true); + enc1_se_setup_dp_audio(enc); + enc1_se_enable_dp_audio(enc); +} + +void enc1_se_dp_audio_disable( + struct stream_encoder *enc) +{ + enc1_se_disable_dp_audio(enc); + enc1_se_enable_audio_clock(enc, false); +} + +void enc1_se_hdmi_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info, + struct audio_crtc_info *audio_crtc_info) +{ + enc1_se_enable_audio_clock(enc, true); + enc1_se_setup_hdmi_audio(enc, audio_crtc_info); + enc1_se_audio_setup(enc, az_inst, info); +} + +void enc1_se_hdmi_audio_disable( + struct stream_encoder *enc) +{ + enc1_se_enable_audio_clock(enc, false); +} + + +static void enc1_setup_stereo_sync( + struct stream_encoder *enc, + int tg_inst, bool enable) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, tg_inst); + REG_UPDATE(DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, !enable); +} + + +static const struct stream_encoder_funcs dcn10_str_enc_funcs = { + .dp_set_stream_attribute = + enc1_stream_encoder_dp_set_stream_attribute, + .hdmi_set_stream_attribute = + enc1_stream_encoder_hdmi_set_stream_attribute, + .dvi_set_stream_attribute = + enc1_stream_encoder_dvi_set_stream_attribute, + .set_mst_bandwidth = + enc1_stream_encoder_set_mst_bandwidth, + .update_hdmi_info_packets = + enc1_stream_encoder_update_hdmi_info_packets, + .stop_hdmi_info_packets = + enc1_stream_encoder_stop_hdmi_info_packets, + .update_dp_info_packets = + enc1_stream_encoder_update_dp_info_packets, + .stop_dp_info_packets = + enc1_stream_encoder_stop_dp_info_packets, + .dp_blank = + enc1_stream_encoder_dp_blank, + .dp_unblank = + enc1_stream_encoder_dp_unblank, + .audio_mute_control = enc1_se_audio_mute_control, + + .dp_audio_setup = enc1_se_dp_audio_setup, + .dp_audio_enable = enc1_se_dp_audio_enable, + .dp_audio_disable = enc1_se_dp_audio_disable, + + .hdmi_audio_setup = enc1_se_hdmi_audio_setup, + .hdmi_audio_disable = enc1_se_hdmi_audio_disable, + .setup_stereo_sync = enc1_setup_stereo_sync, + .set_avmute = enc1_stream_encoder_set_avmute, +}; + +void dcn10_stream_encoder_construct( + struct dcn10_stream_encoder *enc1, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id, + const struct dcn10_stream_enc_registers *regs, + const struct dcn10_stream_encoder_shift *se_shift, + const struct dcn10_stream_encoder_mask *se_mask) +{ + enc1->base.funcs = &dcn10_str_enc_funcs; + enc1->base.ctx = ctx; + enc1->base.id = eng_id; + enc1->base.bp = bp; + enc1->regs = regs; + enc1->se_shift = se_shift; + enc1->se_mask = se_mask; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h new file mode 100644 index 000000000000..86f8ee5ed8b8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -0,0 +1,584 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_STREAM_ENCODER_DCN10_H__ +#define __DC_STREAM_ENCODER_DCN10_H__ + +#include "stream_encoder.h" + +#define DCN10STRENC_FROM_STRENC(stream_encoder)\ + container_of(stream_encoder, struct dcn10_stream_encoder, base) + +#define SE_COMMON_REG_LIST_BASE(id) \ + SRI(AFMT_GENERIC_0, DIG, id), \ + SRI(AFMT_GENERIC_1, DIG, id), \ + SRI(AFMT_GENERIC_2, DIG, id), \ + SRI(AFMT_GENERIC_3, DIG, id), \ + SRI(AFMT_GENERIC_4, DIG, id), \ + SRI(AFMT_GENERIC_5, DIG, id), \ + SRI(AFMT_GENERIC_6, DIG, id), \ + SRI(AFMT_GENERIC_7, DIG, id), \ + SRI(AFMT_GENERIC_HDR, DIG, id), \ + SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \ + SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \ + SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \ + SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \ + SRI(AFMT_60958_0, DIG, id), \ + SRI(AFMT_60958_1, DIG, id), \ + SRI(AFMT_60958_2, DIG, id), \ + SRI(DIG_FE_CNTL, DIG, id), \ + SRI(HDMI_CONTROL, DIG, id), \ + SRI(HDMI_GC, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ + SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \ + SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \ + SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \ + SRI(HDMI_AUDIO_PACKET_CONTROL, DIG, id),\ + SRI(HDMI_ACR_PACKET_CONTROL, DIG, id),\ + SRI(HDMI_ACR_32_0, DIG, id),\ + SRI(HDMI_ACR_32_1, DIG, id),\ + SRI(HDMI_ACR_44_0, DIG, id),\ + SRI(HDMI_ACR_44_1, DIG, id),\ + SRI(HDMI_ACR_48_0, DIG, id),\ + SRI(HDMI_ACR_48_1, DIG, id),\ + SRI(TMDS_CNTL, DIG, id), \ + SRI(DP_MSE_RATE_CNTL, DP, id), \ + SRI(DP_MSE_RATE_UPDATE, DP, id), \ + SRI(DP_PIXEL_FORMAT, DP, id), \ + SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_STEER_FIFO, DP, id), \ + SRI(DP_VID_M, DP, id), \ + SRI(DP_VID_N, DP, id), \ + SRI(DP_VID_STREAM_CNTL, DP, id), \ + SRI(DP_VID_TIMING, DP, id), \ + SRI(DP_SEC_AUD_N, DP, id), \ + SRI(DP_SEC_TIMESTAMP, DP, id) + +#define SE_DCN_REG_LIST(id)\ + SE_COMMON_REG_LIST_BASE(id),\ + SRI(AFMT_CNTL, DIG, id),\ + SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\ + SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ + SRI(DP_DB_CNTL, DP, id), \ + SRI(DP_MSA_MISC, DP, id), \ + SRI(DP_MSA_COLORIMETRY, DP, id), \ + SRI(DP_MSA_TIMING_PARAM1, DP, id), \ + SRI(DP_MSA_TIMING_PARAM2, DP, id), \ + SRI(DP_MSA_TIMING_PARAM3, DP, id), \ + SRI(DP_MSA_TIMING_PARAM4, DP, id), \ + SRI(HDMI_DB_CONTROL, DIG, id) + +#define SE_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh)\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_INDEX, mask_sh),\ + SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB0, mask_sh),\ + SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB1, mask_sh),\ + SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB2, mask_sh),\ + SE_SF(DIG0_AFMT_GENERIC_HDR, AFMT_GENERIC_HB3, mask_sh),\ + SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, mask_sh),\ + SE_SF(DP0_DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_PACKET_GEN_VERSION, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_KEEPOUT_MODE, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_DATA_SCRAMBLE_EN, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, mask_sh),\ + SE_SF(DIG0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\ + SE_SF(DIG0_HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GC, HDMI_GC_AVMUTE, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_X, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_CNTL, DP_MSE_RATE_Y, mask_sh),\ + SE_SF(DP0_DP_MSE_RATE_UPDATE, DP_MSE_RATE_UPDATE_PENDING, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP1_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_MPG_ENABLE, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\ + SE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_STATUS, mask_sh),\ + SE_SF(DP0_DP_STEER_FIFO, DP_STEER_FIFO_RESET, mask_sh),\ + SE_SF(DP0_DP_VID_TIMING, DP_VID_M_N_GEN_EN, mask_sh),\ + SE_SF(DP0_DP_VID_N, DP_VID_N, mask_sh),\ + SE_SF(DP0_DP_VID_M, DP_VID_M, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_START, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\ + SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUDIO_PRIORITY, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_32_0, HDMI_ACR_CTS_32, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_32_1, HDMI_ACR_N_32, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_44_0, HDMI_ACR_CTS_44, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_44_1, HDMI_ACR_N_44, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_48_0, HDMI_ACR_CTS_48, mask_sh),\ + SE_SF(DIG0_HDMI_ACR_48_1, HDMI_ACR_N_48, mask_sh),\ + SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\ + SE_SF(DIG0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\ + SE_SF(DIG0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\ + SE_SF(DIG0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\ + SE_SF(DP0_DP_SEC_AUD_N, DP_SEC_AUD_N, mask_sh),\ + SE_SF(DP0_DP_SEC_TIMESTAMP, DP_SEC_TIMESTAMP_MODE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ASP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ATP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_AIP_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_ACM_ENABLE, mask_sh),\ + SE_SF(DIG0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\ + SE_SF(DIG0_AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, mask_sh),\ + SE_SF(DIG0_HDMI_CONTROL, HDMI_CLOCK_CHANNEL_RATE, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, TMDS_PIXEL_ENCODING, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, TMDS_COLOR_FORMAT, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_SELECT, mask_sh),\ + SE_SF(DIG0_DIG_FE_CNTL, DIG_STEREOSYNC_GATE_EN, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_LOCK_STATUS, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL, AFMT_GENERIC_CONFLICT_CLR, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE_PENDING, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP6_ENABLE, mask_sh),\ + SE_SF(DP0_DP_SEC_CNTL, DP_SEC_GSP7_ENABLE, mask_sh),\ + SE_SF(DP0_DP_DB_CNTL, DP_DB_DISABLE, mask_sh),\ + SE_SF(DP0_DP_MSA_COLORIMETRY, DP_MSA_MISC0, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_HTOTAL, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM1, DP_MSA_VTOTAL, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_HSTART, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM2, DP_MSA_VSTART, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_HSYNCPOLARITY, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM3, DP_MSA_VSYNCPOLARITY, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_HWIDTH, mask_sh),\ + SE_SF(DP0_DP_MSA_TIMING_PARAM4, DP_MSA_VHEIGHT, mask_sh),\ + SE_SF(DIG0_HDMI_DB_CONTROL, HDMI_DB_DISABLE, mask_sh),\ + SE_SF(DP0_DP_VID_TIMING, DP_VID_N_MUL, mask_sh) + +#define SE_COMMON_MASK_SH_LIST_SOC(mask_sh)\ + SE_COMMON_MASK_SH_LIST_SOC_BASE(mask_sh) + +#define SE_COMMON_MASK_SH_LIST_DCN10(mask_sh)\ + SE_COMMON_MASK_SH_LIST_SOC(mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC0_LINE, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_CONT, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\ + SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh) + +struct dcn10_stream_encoder_shift { + uint8_t AFMT_GENERIC_INDEX; + uint8_t AFMT_GENERIC_HB0; + uint8_t AFMT_GENERIC_HB1; + uint8_t AFMT_GENERIC_HB2; + uint8_t AFMT_GENERIC_HB3; + uint8_t AFMT_GENERIC_LOCK_STATUS; + uint8_t AFMT_GENERIC_CONFLICT; + uint8_t AFMT_GENERIC_CONFLICT_CLR; + uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING; + uint8_t AFMT_GENERIC0_FRAME_UPDATE; + uint8_t AFMT_GENERIC1_FRAME_UPDATE; + uint8_t AFMT_GENERIC2_FRAME_UPDATE; + uint8_t AFMT_GENERIC3_FRAME_UPDATE; + uint8_t AFMT_GENERIC4_FRAME_UPDATE; + uint8_t AFMT_GENERIC5_FRAME_UPDATE; + uint8_t AFMT_GENERIC6_FRAME_UPDATE; + uint8_t AFMT_GENERIC7_FRAME_UPDATE; + uint8_t HDMI_GENERIC0_CONT; + uint8_t HDMI_GENERIC0_SEND; + uint8_t HDMI_GENERIC0_LINE; + uint8_t HDMI_GENERIC1_CONT; + uint8_t HDMI_GENERIC1_SEND; + uint8_t HDMI_GENERIC1_LINE; + uint8_t HDMI_GENERIC2_CONT; + uint8_t HDMI_GENERIC2_SEND; + uint8_t HDMI_GENERIC2_LINE; + uint8_t HDMI_GENERIC3_CONT; + uint8_t HDMI_GENERIC3_SEND; + uint8_t HDMI_GENERIC3_LINE; + uint8_t HDMI_GENERIC4_CONT; + uint8_t HDMI_GENERIC4_SEND; + uint8_t HDMI_GENERIC4_LINE; + uint8_t HDMI_GENERIC5_CONT; + uint8_t HDMI_GENERIC5_SEND; + uint8_t HDMI_GENERIC5_LINE; + uint8_t HDMI_GENERIC6_CONT; + uint8_t HDMI_GENERIC6_SEND; + uint8_t HDMI_GENERIC6_LINE; + uint8_t HDMI_GENERIC7_CONT; + uint8_t HDMI_GENERIC7_SEND; + uint8_t HDMI_GENERIC7_LINE; + uint8_t DP_PIXEL_ENCODING; + uint8_t DP_COMPONENT_DEPTH; + uint8_t HDMI_PACKET_GEN_VERSION; + uint8_t HDMI_KEEPOUT_MODE; + uint8_t HDMI_DEEP_COLOR_ENABLE; + uint8_t HDMI_CLOCK_CHANNEL_RATE; + uint8_t HDMI_DEEP_COLOR_DEPTH; + uint8_t HDMI_GC_CONT; + uint8_t HDMI_GC_SEND; + uint8_t HDMI_NULL_SEND; + uint8_t HDMI_DATA_SCRAMBLE_EN; + uint8_t HDMI_AUDIO_INFO_SEND; + uint8_t AFMT_AUDIO_INFO_UPDATE; + uint8_t HDMI_AUDIO_INFO_LINE; + uint8_t HDMI_GC_AVMUTE; + uint8_t DP_MSE_RATE_X; + uint8_t DP_MSE_RATE_Y; + uint8_t DP_MSE_RATE_UPDATE_PENDING; + uint8_t DP_SEC_GSP0_ENABLE; + uint8_t DP_SEC_STREAM_ENABLE; + uint8_t DP_SEC_GSP1_ENABLE; + uint8_t DP_SEC_GSP2_ENABLE; + uint8_t DP_SEC_GSP3_ENABLE; + uint8_t DP_SEC_GSP4_ENABLE; + uint8_t DP_SEC_GSP5_ENABLE; + uint8_t DP_SEC_GSP6_ENABLE; + uint8_t DP_SEC_GSP7_ENABLE; + uint8_t DP_SEC_MPG_ENABLE; + uint8_t DP_VID_STREAM_DIS_DEFER; + uint8_t DP_VID_STREAM_ENABLE; + uint8_t DP_VID_STREAM_STATUS; + uint8_t DP_STEER_FIFO_RESET; + uint8_t DP_VID_M_N_GEN_EN; + uint8_t DP_VID_N; + uint8_t DP_VID_M; + uint8_t DIG_START; + uint8_t AFMT_AUDIO_SRC_SELECT; + uint8_t AFMT_AUDIO_CHANNEL_ENABLE; + uint8_t HDMI_AUDIO_PACKETS_PER_LINE; + uint8_t HDMI_AUDIO_DELAY_EN; + uint8_t AFMT_60958_CS_UPDATE; + uint8_t AFMT_AUDIO_LAYOUT_OVRD; + uint8_t AFMT_60958_OSF_OVRD; + uint8_t HDMI_ACR_AUTO_SEND; + uint8_t HDMI_ACR_SOURCE; + uint8_t HDMI_ACR_AUDIO_PRIORITY; + uint8_t HDMI_ACR_CTS_32; + uint8_t HDMI_ACR_N_32; + uint8_t HDMI_ACR_CTS_44; + uint8_t HDMI_ACR_N_44; + uint8_t HDMI_ACR_CTS_48; + uint8_t HDMI_ACR_N_48; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L; + uint8_t AFMT_60958_CS_CLOCK_ACCURACY; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6; + uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7; + uint8_t DP_SEC_AUD_N; + uint8_t DP_SEC_TIMESTAMP_MODE; + uint8_t DP_SEC_ASP_ENABLE; + uint8_t DP_SEC_ATP_ENABLE; + uint8_t DP_SEC_AIP_ENABLE; + uint8_t DP_SEC_ACM_ENABLE; + uint8_t AFMT_AUDIO_SAMPLE_SEND; + uint8_t AFMT_AUDIO_CLOCK_EN; + uint8_t TMDS_PIXEL_ENCODING; + uint8_t TMDS_COLOR_FORMAT; + uint8_t DIG_STEREOSYNC_SELECT; + uint8_t DIG_STEREOSYNC_GATE_EN; + uint8_t DP_DB_DISABLE; + uint8_t DP_MSA_MISC0; + uint8_t DP_MSA_HTOTAL; + uint8_t DP_MSA_VTOTAL; + uint8_t DP_MSA_HSTART; + uint8_t DP_MSA_VSTART; + uint8_t DP_MSA_HSYNCWIDTH; + uint8_t DP_MSA_HSYNCPOLARITY; + uint8_t DP_MSA_VSYNCWIDTH; + uint8_t DP_MSA_VSYNCPOLARITY; + uint8_t DP_MSA_HWIDTH; + uint8_t DP_MSA_VHEIGHT; + uint8_t HDMI_DB_DISABLE; + uint8_t DP_VID_N_MUL; + uint8_t DP_VID_M_DOUBLE_VALUE_EN; +}; + +struct dcn10_stream_encoder_mask { + uint32_t AFMT_GENERIC_INDEX; + uint32_t AFMT_GENERIC_HB0; + uint32_t AFMT_GENERIC_HB1; + uint32_t AFMT_GENERIC_HB2; + uint32_t AFMT_GENERIC_HB3; + uint32_t AFMT_GENERIC_LOCK_STATUS; + uint32_t AFMT_GENERIC_CONFLICT; + uint32_t AFMT_GENERIC_CONFLICT_CLR; + uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING; + uint32_t AFMT_GENERIC0_FRAME_UPDATE; + uint32_t AFMT_GENERIC1_FRAME_UPDATE; + uint32_t AFMT_GENERIC2_FRAME_UPDATE; + uint32_t AFMT_GENERIC3_FRAME_UPDATE; + uint32_t AFMT_GENERIC4_FRAME_UPDATE; + uint32_t AFMT_GENERIC5_FRAME_UPDATE; + uint32_t AFMT_GENERIC6_FRAME_UPDATE; + uint32_t AFMT_GENERIC7_FRAME_UPDATE; + uint32_t HDMI_GENERIC0_CONT; + uint32_t HDMI_GENERIC0_SEND; + uint32_t HDMI_GENERIC0_LINE; + uint32_t HDMI_GENERIC1_CONT; + uint32_t HDMI_GENERIC1_SEND; + uint32_t HDMI_GENERIC1_LINE; + uint32_t HDMI_GENERIC2_CONT; + uint32_t HDMI_GENERIC2_SEND; + uint32_t HDMI_GENERIC2_LINE; + uint32_t HDMI_GENERIC3_CONT; + uint32_t HDMI_GENERIC3_SEND; + uint32_t HDMI_GENERIC3_LINE; + uint32_t HDMI_GENERIC4_CONT; + uint32_t HDMI_GENERIC4_SEND; + uint32_t HDMI_GENERIC4_LINE; + uint32_t HDMI_GENERIC5_CONT; + uint32_t HDMI_GENERIC5_SEND; + uint32_t HDMI_GENERIC5_LINE; + uint32_t HDMI_GENERIC6_CONT; + uint32_t HDMI_GENERIC6_SEND; + uint32_t HDMI_GENERIC6_LINE; + uint32_t HDMI_GENERIC7_CONT; + uint32_t HDMI_GENERIC7_SEND; + uint32_t HDMI_GENERIC7_LINE; + uint32_t DP_PIXEL_ENCODING; + uint32_t DP_COMPONENT_DEPTH; + uint32_t HDMI_PACKET_GEN_VERSION; + uint32_t HDMI_KEEPOUT_MODE; + uint32_t HDMI_DEEP_COLOR_ENABLE; + uint32_t HDMI_CLOCK_CHANNEL_RATE; + uint32_t HDMI_DEEP_COLOR_DEPTH; + uint32_t HDMI_GC_CONT; + uint32_t HDMI_GC_SEND; + uint32_t HDMI_NULL_SEND; + uint32_t HDMI_DATA_SCRAMBLE_EN; + uint32_t HDMI_AUDIO_INFO_SEND; + uint32_t AFMT_AUDIO_INFO_UPDATE; + uint32_t HDMI_AUDIO_INFO_LINE; + uint32_t HDMI_GC_AVMUTE; + uint32_t DP_MSE_RATE_X; + uint32_t DP_MSE_RATE_Y; + uint32_t DP_MSE_RATE_UPDATE_PENDING; + uint32_t DP_SEC_GSP0_ENABLE; + uint32_t DP_SEC_STREAM_ENABLE; + uint32_t DP_SEC_GSP1_ENABLE; + uint32_t DP_SEC_GSP2_ENABLE; + uint32_t DP_SEC_GSP3_ENABLE; + uint32_t DP_SEC_GSP4_ENABLE; + uint32_t DP_SEC_GSP5_ENABLE; + uint32_t DP_SEC_GSP6_ENABLE; + uint32_t DP_SEC_GSP7_ENABLE; + uint32_t DP_SEC_MPG_ENABLE; + uint32_t DP_VID_STREAM_DIS_DEFER; + uint32_t DP_VID_STREAM_ENABLE; + uint32_t DP_VID_STREAM_STATUS; + uint32_t DP_STEER_FIFO_RESET; + uint32_t DP_VID_M_N_GEN_EN; + uint32_t DP_VID_N; + uint32_t DP_VID_M; + uint32_t DIG_START; + uint32_t AFMT_AUDIO_SRC_SELECT; + uint32_t AFMT_AUDIO_CHANNEL_ENABLE; + uint32_t HDMI_AUDIO_PACKETS_PER_LINE; + uint32_t HDMI_AUDIO_DELAY_EN; + uint32_t AFMT_60958_CS_UPDATE; + uint32_t AFMT_AUDIO_LAYOUT_OVRD; + uint32_t AFMT_60958_OSF_OVRD; + uint32_t HDMI_ACR_AUTO_SEND; + uint32_t HDMI_ACR_SOURCE; + uint32_t HDMI_ACR_AUDIO_PRIORITY; + uint32_t HDMI_ACR_CTS_32; + uint32_t HDMI_ACR_N_32; + uint32_t HDMI_ACR_CTS_44; + uint32_t HDMI_ACR_N_44; + uint32_t HDMI_ACR_CTS_48; + uint32_t HDMI_ACR_N_48; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L; + uint32_t AFMT_60958_CS_CLOCK_ACCURACY; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6; + uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7; + uint32_t DP_SEC_AUD_N; + uint32_t DP_SEC_TIMESTAMP_MODE; + uint32_t DP_SEC_ASP_ENABLE; + uint32_t DP_SEC_ATP_ENABLE; + uint32_t DP_SEC_AIP_ENABLE; + uint32_t DP_SEC_ACM_ENABLE; + uint32_t AFMT_AUDIO_SAMPLE_SEND; + uint32_t AFMT_AUDIO_CLOCK_EN; + uint32_t TMDS_PIXEL_ENCODING; + uint32_t DIG_STEREOSYNC_SELECT; + uint32_t DIG_STEREOSYNC_GATE_EN; + uint32_t TMDS_COLOR_FORMAT; + uint32_t DP_DB_DISABLE; + uint32_t DP_MSA_MISC0; + uint32_t DP_MSA_HTOTAL; + uint32_t DP_MSA_VTOTAL; + uint32_t DP_MSA_HSTART; + uint32_t DP_MSA_VSTART; + uint32_t DP_MSA_HSYNCWIDTH; + uint32_t DP_MSA_HSYNCPOLARITY; + uint32_t DP_MSA_VSYNCWIDTH; + uint32_t DP_MSA_VSYNCPOLARITY; + uint32_t DP_MSA_HWIDTH; + uint32_t DP_MSA_VHEIGHT; + uint32_t HDMI_DB_DISABLE; + uint32_t DP_VID_N_MUL; + uint32_t DP_VID_M_DOUBLE_VALUE_EN; +}; + +struct dcn10_stream_enc_registers { + uint32_t AFMT_CNTL; + uint32_t AFMT_AVI_INFO0; + uint32_t AFMT_AVI_INFO1; + uint32_t AFMT_AVI_INFO2; + uint32_t AFMT_AVI_INFO3; + uint32_t AFMT_GENERIC_0; + uint32_t AFMT_GENERIC_1; + uint32_t AFMT_GENERIC_2; + uint32_t AFMT_GENERIC_3; + uint32_t AFMT_GENERIC_4; + uint32_t AFMT_GENERIC_5; + uint32_t AFMT_GENERIC_6; + uint32_t AFMT_GENERIC_7; + uint32_t AFMT_GENERIC_HDR; + uint32_t AFMT_INFOFRAME_CONTROL0; + uint32_t AFMT_VBI_PACKET_CONTROL; + uint32_t AFMT_VBI_PACKET_CONTROL1; + uint32_t AFMT_AUDIO_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL2; + uint32_t AFMT_AUDIO_SRC_CONTROL; + uint32_t AFMT_60958_0; + uint32_t AFMT_60958_1; + uint32_t AFMT_60958_2; + uint32_t DIG_FE_CNTL; + uint32_t DP_MSE_RATE_CNTL; + uint32_t DP_MSE_RATE_UPDATE; + uint32_t DP_PIXEL_FORMAT; + uint32_t DP_SEC_CNTL; + uint32_t DP_STEER_FIFO; + uint32_t DP_VID_M; + uint32_t DP_VID_N; + uint32_t DP_VID_STREAM_CNTL; + uint32_t DP_VID_TIMING; + uint32_t DP_SEC_AUD_N; + uint32_t DP_SEC_TIMESTAMP; + uint32_t HDMI_CONTROL; + uint32_t HDMI_GC; + uint32_t HDMI_GENERIC_PACKET_CONTROL0; + uint32_t HDMI_GENERIC_PACKET_CONTROL1; + uint32_t HDMI_GENERIC_PACKET_CONTROL2; + uint32_t HDMI_GENERIC_PACKET_CONTROL3; + uint32_t HDMI_GENERIC_PACKET_CONTROL4; + uint32_t HDMI_GENERIC_PACKET_CONTROL5; + uint32_t HDMI_INFOFRAME_CONTROL0; + uint32_t HDMI_INFOFRAME_CONTROL1; + uint32_t HDMI_VBI_PACKET_CONTROL; + uint32_t HDMI_AUDIO_PACKET_CONTROL; + uint32_t HDMI_ACR_PACKET_CONTROL; + uint32_t HDMI_ACR_32_0; + uint32_t HDMI_ACR_32_1; + uint32_t HDMI_ACR_44_0; + uint32_t HDMI_ACR_44_1; + uint32_t HDMI_ACR_48_0; + uint32_t HDMI_ACR_48_1; + uint32_t TMDS_CNTL; + uint32_t DP_DB_CNTL; + uint32_t DP_MSA_MISC; + uint32_t DP_MSA_COLORIMETRY; + uint32_t DP_MSA_TIMING_PARAM1; + uint32_t DP_MSA_TIMING_PARAM2; + uint32_t DP_MSA_TIMING_PARAM3; + uint32_t DP_MSA_TIMING_PARAM4; + uint32_t HDMI_DB_CONTROL; +}; + +struct dcn10_stream_encoder { + struct stream_encoder base; + const struct dcn10_stream_enc_registers *regs; + const struct dcn10_stream_encoder_shift *se_shift; + const struct dcn10_stream_encoder_mask *se_mask; +}; + +void dcn10_stream_encoder_construct( + struct dcn10_stream_encoder *enc1, + struct dc_context *ctx, + struct dc_bios *bp, + enum engine_id eng_id, + const struct dcn10_stream_enc_registers *regs, + const struct dcn10_stream_encoder_shift *se_shift, + const struct dcn10_stream_encoder_mask *se_mask); + +#endif /* __DC_STREAM_ENCODER_DCN10_H__ */ -- cgit v1.2.1 From 3dc8acad23519123bdecaf3184f2ae774c5775fc Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Thu, 1 Mar 2018 08:58:02 -0500 Subject: drm/amd/display: remove unused enum Signed-off-by: Jun Lei Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_types.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index cd324bcc45e8..9defe3b17617 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -370,12 +370,6 @@ struct dc_csc_adjustments { struct fixed31_32 hue; }; -enum { - MAX_LANES = 2, - MAX_COFUNC_PATH = 6, - LAYER_INDEX_PRIMARY = -1, -}; - enum dpcd_downstream_port_max_bpc { DOWN_STREAM_MAX_8BPC = 0, DOWN_STREAM_MAX_10BPC, -- cgit v1.2.1 From cf65ebeb687678812eb3ddd5ef253bacf7ef330a Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 23 Mar 2018 13:56:16 -0400 Subject: drm/amd/display: fix link bw calculation for 422 and 420 encoding Link bw required is reduced when we have chroma subsampling. Signed-off-by: Eric Yang Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 53 ++++++++++++++---------- 1 file changed, 32 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b86325bb636f..07cc4385a7c1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1378,37 +1378,48 @@ static uint32_t bandwidth_in_kbps_from_timing( { uint32_t bits_per_channel = 0; uint32_t kbps; - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - bits_per_channel = 6; - break; - case COLOR_DEPTH_888: - bits_per_channel = 8; - break; - case COLOR_DEPTH_101010: - bits_per_channel = 10; - break; - case COLOR_DEPTH_121212: + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) bits_per_channel = 12; - break; - case COLOR_DEPTH_141414: - bits_per_channel = 14; - break; - case COLOR_DEPTH_161616: - bits_per_channel = 16; - break; - default: - break; + else{ + + switch (timing->display_color_depth) { + + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } } ASSERT(bits_per_channel != 0); kbps = timing->pix_clk_khz; kbps *= bits_per_channel; - if (timing->flags.Y_ONLY != 1) + if (timing->flags.Y_ONLY != 1) { /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } return kbps; -- cgit v1.2.1 From 8f121fe281692ce4b7849ee7be9f3c0dcb079742 Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Mon, 26 Mar 2018 14:01:41 -0400 Subject: drm/amd/display: Fill calcs date from stream src/dst if available We would otherwise fallback to the timing, which would always give us identity. Signed-off-by: Jun Lei Reviewed-by: Wesley Chalmers Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 4b719328afd6..56f46a065a93 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -2933,6 +2933,19 @@ static void populate_initial_data( data->bytes_per_pixel[num_displays + 4] = 4; break; } + } else if (pipe[i].stream->dst.width != 0 && + pipe[i].stream->dst.height != 0 && + pipe[i].stream->src.width != 0 && + pipe[i].stream->src.height != 0) { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height); + data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width); + data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height); + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + data->bytes_per_pixel[num_displays + 4] = 4; } else { data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable); data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; -- cgit v1.2.1 From 5282cbe32ea5b8008f86a743922f018287113e2c Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Tue, 27 Mar 2018 10:05:10 -0400 Subject: drm/amd/display: Change disable backlight ramp change threshold from 0 to maximum value. Instead of user set brightness with range of percentage, HLK test set brightness level with range of normal, this will result in HLK test case set brightness from 0 to 255, DC set brightness with ramp is 0, and disabled ramp change which will fail the HLK test. Fix: In case of unblank stream and turn on edp, change brightness level in stream to 0xFFFFFFFF(actural maximum level is 0xFF), use that value as a flag to recogonize this the case of resume from S3. Signed-off-by: Yongqiang Sun Reviewed-by: Eric Yang Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 2 +- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index d9efdd926145..0cd286f8eaa0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1982,7 +1982,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, return false; if (stream) { - if (stream->bl_pwm_level == 0) + if (stream->bl_pwm_level == EDP_BACKLIGHT_RAMP_DISABLE_LEVEL) frame_ramp = 0; ((struct dc_stream_state *)stream)->bl_pwm_level = level; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index db2d15dfb831..78bf4fae9e0d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1036,7 +1036,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx, if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { link->dc->hwss.edp_backlight_control(link, true); - stream->bl_pwm_level = 0; + stream->bl_pwm_level = EDP_BACKLIGHT_RAMP_DISABLE_LEVEL; } } void dce110_blank_stream(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index e764cbad881b..f54d478ffc5c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -32,6 +32,8 @@ #include "inc/hw/link_encoder.h" #include "core_status.h" +#define EDP_BACKLIGHT_RAMP_DISABLE_LEVEL 0xFFFFFFFF + enum pipe_gating_control { PIPE_GATING_CONTROL_DISABLE = 0, PIPE_GATING_CONTROL_ENABLE, -- cgit v1.2.1 From dc002a2e4f6e164fd9e5c1353df795dc65784887 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 26 Mar 2018 12:33:22 -0400 Subject: drm/amd/display: Update scaler v_active data if interlaced Signed-off-by: Dmytro Laktyushkin Reviewed-by: Nikola Cornij Reviewed-by: Wesley Chalmers Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 50b84f69bd25..eb8f4792198c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -844,6 +844,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); + if (pipe_ctx->stream->timing.flags.INTERLACE) + pipe_ctx->stream->dst.height *= 2; + calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -864,6 +867,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; + if (pipe_ctx->stream->timing.flags.INTERLACE) + pipe_ctx->plane_res.scl_data.v_active *= 2; /* Taps calculations */ @@ -909,6 +914,9 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) plane_state->dst_rect.x, plane_state->dst_rect.y); + if (pipe_ctx->stream->timing.flags.INTERLACE) + pipe_ctx->stream->dst.height /= 2; + return res; } -- cgit v1.2.1 From c5011872f6ad7fb8700117ae2fbdcd3ebbbe8402 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 26 Mar 2018 16:28:03 -0400 Subject: drm/amd/display: Make DCN stream encoder shareable Signed-off-by: Eric Bernstein Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce/dce_stream_encoder.c | 39 +- .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 62 +- .../amd/display/dc/dcn10/dcn10_stream_encoder.h | 646 ++++++++++----------- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 17 + 4 files changed, 343 insertions(+), 421 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index b85fda5f38e8..07c32421c226 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -26,27 +26,10 @@ #include "dc_bios_types.h" #include "dce_stream_encoder.h" #include "reg_helper.h" +#include "hw_shared.h" + #define DC_LOGGER \ enc110->base.ctx->logger -enum DP_PIXEL_ENCODING { -DP_PIXEL_ENCODING_RGB444 = 0x00000000, -DP_PIXEL_ENCODING_YCBCR422 = 0x00000001, -DP_PIXEL_ENCODING_YCBCR444 = 0x00000002, -DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003, -DP_PIXEL_ENCODING_Y_ONLY = 0x00000004, -DP_PIXEL_ENCODING_YCBCR420 = 0x00000005, -DP_PIXEL_ENCODING_RESERVED = 0x00000006, -}; - - -enum DP_COMPONENT_DEPTH { -DP_COMPONENT_DEPTH_6BPC = 0x00000000, -DP_COMPONENT_DEPTH_8BPC = 0x00000001, -DP_COMPONENT_DEPTH_10BPC = 0x00000002, -DP_COMPONENT_DEPTH_12BPC = 0x00000003, -DP_COMPONENT_DEPTH_16BPC = 0x00000004, -DP_COMPONENT_DEPTH_RESERVED = 0x00000005, -}; #define REG(reg)\ @@ -314,11 +297,11 @@ static void dce110_stream_encoder_dp_set_stream_attribute( switch (crtc_timing->pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR422); + DP_PIXEL_ENCODING_TYPE_YCBCR422); break; case PIXEL_ENCODING_YCBCR444: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR444); + DP_PIXEL_ENCODING_TYPE_YCBCR444); if (crtc_timing->flags.Y_ONLY) if (crtc_timing->display_color_depth != COLOR_DEPTH_666) @@ -326,7 +309,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( * Color depth of Y-only could be * 8, 10, 12, 16 bits */ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_Y_ONLY); + DP_PIXEL_ENCODING_TYPE_Y_ONLY); /* Note: DP_MSA_MISC1 bit 7 is the indicator * of Y-only mode. * This bit is set in HW if register @@ -334,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR420); + DP_PIXEL_ENCODING_TYPE_YCBCR420); if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN) REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1); @@ -345,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_RGB444); + DP_PIXEL_ENCODING_TYPE_RGB444); break; } @@ -363,20 +346,20 @@ static void dce110_stream_encoder_dp_set_stream_attribute( break; case COLOR_DEPTH_888: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_8BPC); + DP_COMPONENT_PIXEL_DEPTH_8BPC); break; case COLOR_DEPTH_101010: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_10BPC); + DP_COMPONENT_PIXEL_DEPTH_10BPC); break; case COLOR_DEPTH_121212: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_12BPC); + DP_COMPONENT_PIXEL_DEPTH_12BPC); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_6BPC); + DP_COMPONENT_PIXEL_DEPTH_6BPC); break; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 0413c707b921..9ec46f8fc7cc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -26,29 +26,11 @@ #include "dc_bios_types.h" #include "dcn10_stream_encoder.h" - #include "reg_helper.h" +#include "hw_shared.h" + #define DC_LOGGER \ enc1->base.ctx->logger -enum DP_PIXEL_ENCODING { -DP_PIXEL_ENCODING_RGB444 = 0x00000000, -DP_PIXEL_ENCODING_YCBCR422 = 0x00000001, -DP_PIXEL_ENCODING_YCBCR444 = 0x00000002, -DP_PIXEL_ENCODING_RGB_WIDE_GAMUT = 0x00000003, -DP_PIXEL_ENCODING_Y_ONLY = 0x00000004, -DP_PIXEL_ENCODING_YCBCR420 = 0x00000005, -DP_PIXEL_ENCODING_RESERVED = 0x00000006, -}; - - -enum DP_COMPONENT_DEPTH { -DP_COMPONENT_DEPTH_6BPC = 0x00000000, -DP_COMPONENT_DEPTH_8BPC = 0x00000001, -DP_COMPONENT_DEPTH_10BPC = 0x00000002, -DP_COMPONENT_DEPTH_12BPC = 0x00000003, -DP_COMPONENT_DEPTH_16BPC = 0x00000004, -DP_COMPONENT_DEPTH_RESERVED = 0x00000005, -}; #define REG(reg)\ @@ -70,7 +52,7 @@ enum { #define CTX \ enc1->base.ctx -static void enc1_update_generic_info_packet( +void enc1_update_generic_info_packet( struct dcn10_stream_encoder *enc1, uint32_t packet_index, const struct dc_info_packet *info_packet) @@ -260,7 +242,7 @@ static void enc1_update_hdmi_info_packet( } /* setup stream encoder in dp mode */ -static void enc1_stream_encoder_dp_set_stream_attribute( +void enc1_stream_encoder_dp_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, enum dc_color_space output_color_space) @@ -284,11 +266,11 @@ static void enc1_stream_encoder_dp_set_stream_attribute( switch (crtc_timing->pixel_encoding) { case PIXEL_ENCODING_YCBCR422: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR422); + DP_PIXEL_ENCODING_TYPE_YCBCR422); break; case PIXEL_ENCODING_YCBCR444: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR444); + DP_PIXEL_ENCODING_TYPE_YCBCR444); if (crtc_timing->flags.Y_ONLY) if (crtc_timing->display_color_depth != COLOR_DEPTH_666) @@ -297,7 +279,7 @@ static void enc1_stream_encoder_dp_set_stream_attribute( * 8, 10, 12, 16 bits */ REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_Y_ONLY); + DP_PIXEL_ENCODING_TYPE_Y_ONLY); /* Note: DP_MSA_MISC1 bit 7 is the indicator * of Y-only mode. * This bit is set in HW if register @@ -306,12 +288,12 @@ static void enc1_stream_encoder_dp_set_stream_attribute( break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_YCBCR420); + DP_PIXEL_ENCODING_TYPE_YCBCR420); REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_PIXEL_ENCODING, - DP_PIXEL_ENCODING_RGB444); + DP_PIXEL_ENCODING_TYPE_RGB444); break; } @@ -326,20 +308,20 @@ static void enc1_stream_encoder_dp_set_stream_attribute( break; case COLOR_DEPTH_888: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_8BPC); + DP_COMPONENT_PIXEL_DEPTH_8BPC); break; case COLOR_DEPTH_101010: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_10BPC); + DP_COMPONENT_PIXEL_DEPTH_10BPC); break; case COLOR_DEPTH_121212: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_12BPC); + DP_COMPONENT_PIXEL_DEPTH_12BPC); break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, - DP_COMPONENT_DEPTH_6BPC); + DP_COMPONENT_PIXEL_DEPTH_6BPC); break; } @@ -485,7 +467,7 @@ static void enc1_stream_encoder_set_stream_attribute_helper( } /* setup stream encoder in hdmi mode */ -static void enc1_stream_encoder_hdmi_set_stream_attribute( +void enc1_stream_encoder_hdmi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, int actual_pix_clk_khz, @@ -591,7 +573,7 @@ static void enc1_stream_encoder_hdmi_set_stream_attribute( } /* setup stream encoder in dvi mode */ -static void enc1_stream_encoder_dvi_set_stream_attribute( +void enc1_stream_encoder_dvi_set_stream_attribute( struct stream_encoder *enc, struct dc_crtc_timing *crtc_timing, bool is_dual_link) @@ -616,7 +598,7 @@ static void enc1_stream_encoder_dvi_set_stream_attribute( enc1_stream_encoder_set_stream_attribute_helper(enc1, crtc_timing); } -static void enc1_stream_encoder_set_mst_bandwidth( +void enc1_stream_encoder_set_mst_bandwidth( struct stream_encoder *enc, struct fixed31_32 avg_time_slots_per_mtp) { @@ -699,7 +681,7 @@ static void enc1_stream_encoder_stop_hdmi_info_packets( HDMI_GENERIC1_SEND, 0); } -static void enc1_stream_encoder_update_dp_info_packets( +void enc1_stream_encoder_update_dp_info_packets( struct stream_encoder *enc, const struct encoder_info_frame *info_frame) { @@ -742,7 +724,7 @@ static void enc1_stream_encoder_update_dp_info_packets( REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } -static void enc1_stream_encoder_stop_dp_info_packets( +void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc) { /* stop generic packets on DP */ @@ -770,7 +752,7 @@ static void enc1_stream_encoder_stop_dp_info_packets( } -static void enc1_stream_encoder_dp_blank( +void enc1_stream_encoder_dp_blank( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); @@ -823,7 +805,7 @@ static void enc1_stream_encoder_dp_blank( } /* output video stream to link encoder */ -static void enc1_stream_encoder_dp_unblank( +void enc1_stream_encoder_dp_unblank( struct stream_encoder *enc, const struct encoder_unblank_param *param) { @@ -885,7 +867,7 @@ static void enc1_stream_encoder_dp_unblank( REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true); } -static void enc1_stream_encoder_set_avmute( +void enc1_stream_encoder_set_avmute( struct stream_encoder *enc, bool enable) { @@ -1442,7 +1424,7 @@ void enc1_se_hdmi_audio_disable( } -static void enc1_setup_stereo_sync( +void enc1_setup_stereo_sync( struct stream_encoder *enc, int tg_inst, bool enable) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 86f8ee5ed8b8..6b3e4ded155b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -31,7 +31,8 @@ #define DCN10STRENC_FROM_STRENC(stream_encoder)\ container_of(stream_encoder, struct dcn10_stream_encoder, base) -#define SE_COMMON_REG_LIST_BASE(id) \ +#define SE_COMMON_DCN_REG_LIST(id) \ + SRI(AFMT_CNTL, DIG, id), \ SRI(AFMT_GENERIC_0, DIG, id), \ SRI(AFMT_GENERIC_1, DIG, id), \ SRI(AFMT_GENERIC_2, DIG, id), \ @@ -43,6 +44,7 @@ SRI(AFMT_GENERIC_HDR, DIG, id), \ SRI(AFMT_INFOFRAME_CONTROL0, DIG, id), \ SRI(AFMT_VBI_PACKET_CONTROL, DIG, id), \ + SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id), \ SRI(AFMT_AUDIO_PACKET_CONTROL, DIG, id), \ SRI(AFMT_AUDIO_PACKET_CONTROL2, DIG, id), \ SRI(AFMT_AUDIO_SRC_CONTROL, DIG, id), \ @@ -51,9 +53,12 @@ SRI(AFMT_60958_2, DIG, id), \ SRI(DIG_FE_CNTL, DIG, id), \ SRI(HDMI_CONTROL, DIG, id), \ + SRI(HDMI_DB_CONTROL, DIG, id), \ SRI(HDMI_GC, DIG, id), \ SRI(HDMI_GENERIC_PACKET_CONTROL0, DIG, id), \ SRI(HDMI_GENERIC_PACKET_CONTROL1, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ + SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ SRI(HDMI_INFOFRAME_CONTROL0, DIG, id), \ SRI(HDMI_INFOFRAME_CONTROL1, DIG, id), \ SRI(HDMI_VBI_PACKET_CONTROL, DIG, id), \ @@ -65,7 +70,13 @@ SRI(HDMI_ACR_44_1, DIG, id),\ SRI(HDMI_ACR_48_0, DIG, id),\ SRI(HDMI_ACR_48_1, DIG, id),\ - SRI(TMDS_CNTL, DIG, id), \ + SRI(DP_DB_CNTL, DP, id), \ + SRI(DP_MSA_MISC, DP, id), \ + SRI(DP_MSA_COLORIMETRY, DP, id), \ + SRI(DP_MSA_TIMING_PARAM1, DP, id), \ + SRI(DP_MSA_TIMING_PARAM2, DP, id), \ + SRI(DP_MSA_TIMING_PARAM3, DP, id), \ + SRI(DP_MSA_TIMING_PARAM4, DP, id), \ SRI(DP_MSE_RATE_CNTL, DP, id), \ SRI(DP_MSE_RATE_UPDATE, DP, id), \ SRI(DP_PIXEL_FORMAT, DP, id), \ @@ -79,19 +90,74 @@ SRI(DP_SEC_TIMESTAMP, DP, id) #define SE_DCN_REG_LIST(id)\ - SE_COMMON_REG_LIST_BASE(id),\ - SRI(AFMT_CNTL, DIG, id),\ - SRI(AFMT_VBI_PACKET_CONTROL1, DIG, id),\ - SRI(HDMI_GENERIC_PACKET_CONTROL2, DIG, id), \ - SRI(HDMI_GENERIC_PACKET_CONTROL3, DIG, id), \ - SRI(DP_DB_CNTL, DP, id), \ - SRI(DP_MSA_MISC, DP, id), \ - SRI(DP_MSA_COLORIMETRY, DP, id), \ - SRI(DP_MSA_TIMING_PARAM1, DP, id), \ - SRI(DP_MSA_TIMING_PARAM2, DP, id), \ - SRI(DP_MSA_TIMING_PARAM3, DP, id), \ - SRI(DP_MSA_TIMING_PARAM4, DP, id), \ - SRI(HDMI_DB_CONTROL, DIG, id) + SE_COMMON_DCN_REG_LIST(id) + + +struct dcn10_stream_enc_registers { + uint32_t AFMT_CNTL; + uint32_t AFMT_AVI_INFO0; + uint32_t AFMT_AVI_INFO1; + uint32_t AFMT_AVI_INFO2; + uint32_t AFMT_AVI_INFO3; + uint32_t AFMT_GENERIC_0; + uint32_t AFMT_GENERIC_1; + uint32_t AFMT_GENERIC_2; + uint32_t AFMT_GENERIC_3; + uint32_t AFMT_GENERIC_4; + uint32_t AFMT_GENERIC_5; + uint32_t AFMT_GENERIC_6; + uint32_t AFMT_GENERIC_7; + uint32_t AFMT_GENERIC_HDR; + uint32_t AFMT_INFOFRAME_CONTROL0; + uint32_t AFMT_VBI_PACKET_CONTROL; + uint32_t AFMT_VBI_PACKET_CONTROL1; + uint32_t AFMT_AUDIO_PACKET_CONTROL; + uint32_t AFMT_AUDIO_PACKET_CONTROL2; + uint32_t AFMT_AUDIO_SRC_CONTROL; + uint32_t AFMT_60958_0; + uint32_t AFMT_60958_1; + uint32_t AFMT_60958_2; + uint32_t DIG_FE_CNTL; + uint32_t DP_MSE_RATE_CNTL; + uint32_t DP_MSE_RATE_UPDATE; + uint32_t DP_PIXEL_FORMAT; + uint32_t DP_SEC_CNTL; + uint32_t DP_STEER_FIFO; + uint32_t DP_VID_M; + uint32_t DP_VID_N; + uint32_t DP_VID_STREAM_CNTL; + uint32_t DP_VID_TIMING; + uint32_t DP_SEC_AUD_N; + uint32_t DP_SEC_TIMESTAMP; + uint32_t HDMI_CONTROL; + uint32_t HDMI_GC; + uint32_t HDMI_GENERIC_PACKET_CONTROL0; + uint32_t HDMI_GENERIC_PACKET_CONTROL1; + uint32_t HDMI_GENERIC_PACKET_CONTROL2; + uint32_t HDMI_GENERIC_PACKET_CONTROL3; + uint32_t HDMI_GENERIC_PACKET_CONTROL4; + uint32_t HDMI_GENERIC_PACKET_CONTROL5; + uint32_t HDMI_INFOFRAME_CONTROL0; + uint32_t HDMI_INFOFRAME_CONTROL1; + uint32_t HDMI_VBI_PACKET_CONTROL; + uint32_t HDMI_AUDIO_PACKET_CONTROL; + uint32_t HDMI_ACR_PACKET_CONTROL; + uint32_t HDMI_ACR_32_0; + uint32_t HDMI_ACR_32_1; + uint32_t HDMI_ACR_44_0; + uint32_t HDMI_ACR_44_1; + uint32_t HDMI_ACR_48_0; + uint32_t HDMI_ACR_48_1; + uint32_t DP_DB_CNTL; + uint32_t DP_MSA_MISC; + uint32_t DP_MSA_COLORIMETRY; + uint32_t DP_MSA_TIMING_PARAM1; + uint32_t DP_MSA_TIMING_PARAM2; + uint32_t DP_MSA_TIMING_PARAM3; + uint32_t DP_MSA_TIMING_PARAM4; + uint32_t HDMI_DB_CONTROL; +}; + #define SE_SF(reg_name, field_name, post_fix)\ .field_name = reg_name ## __ ## field_name ## post_fix @@ -221,348 +287,151 @@ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_SEND, mask_sh),\ SE_SF(DIG0_HDMI_GENERIC_PACKET_CONTROL0, HDMI_GENERIC1_LINE, mask_sh) + +#define SE_REG_FIELD_LIST_DCN1_0(type) \ + type AFMT_GENERIC_INDEX;\ + type AFMT_GENERIC_HB0;\ + type AFMT_GENERIC_HB1;\ + type AFMT_GENERIC_HB2;\ + type AFMT_GENERIC_HB3;\ + type AFMT_GENERIC_LOCK_STATUS;\ + type AFMT_GENERIC_CONFLICT;\ + type AFMT_GENERIC_CONFLICT_CLR;\ + type AFMT_GENERIC0_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC1_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC2_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC3_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC4_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC5_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC6_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC7_FRAME_UPDATE_PENDING;\ + type AFMT_GENERIC0_FRAME_UPDATE;\ + type AFMT_GENERIC1_FRAME_UPDATE;\ + type AFMT_GENERIC2_FRAME_UPDATE;\ + type AFMT_GENERIC3_FRAME_UPDATE;\ + type AFMT_GENERIC4_FRAME_UPDATE;\ + type AFMT_GENERIC5_FRAME_UPDATE;\ + type AFMT_GENERIC6_FRAME_UPDATE;\ + type AFMT_GENERIC7_FRAME_UPDATE;\ + type HDMI_GENERIC0_CONT;\ + type HDMI_GENERIC0_SEND;\ + type HDMI_GENERIC0_LINE;\ + type HDMI_GENERIC1_CONT;\ + type HDMI_GENERIC1_SEND;\ + type HDMI_GENERIC1_LINE;\ + type HDMI_GENERIC2_CONT;\ + type HDMI_GENERIC2_SEND;\ + type HDMI_GENERIC2_LINE;\ + type HDMI_GENERIC3_CONT;\ + type HDMI_GENERIC3_SEND;\ + type HDMI_GENERIC3_LINE;\ + type HDMI_GENERIC4_CONT;\ + type HDMI_GENERIC4_SEND;\ + type HDMI_GENERIC4_LINE;\ + type HDMI_GENERIC5_CONT;\ + type HDMI_GENERIC5_SEND;\ + type HDMI_GENERIC5_LINE;\ + type HDMI_GENERIC6_CONT;\ + type HDMI_GENERIC6_SEND;\ + type HDMI_GENERIC6_LINE;\ + type HDMI_GENERIC7_CONT;\ + type HDMI_GENERIC7_SEND;\ + type HDMI_GENERIC7_LINE;\ + type DP_PIXEL_ENCODING;\ + type DP_COMPONENT_DEPTH;\ + type HDMI_PACKET_GEN_VERSION;\ + type HDMI_KEEPOUT_MODE;\ + type HDMI_DEEP_COLOR_ENABLE;\ + type HDMI_CLOCK_CHANNEL_RATE;\ + type HDMI_DEEP_COLOR_DEPTH;\ + type HDMI_GC_CONT;\ + type HDMI_GC_SEND;\ + type HDMI_NULL_SEND;\ + type HDMI_DATA_SCRAMBLE_EN;\ + type HDMI_AUDIO_INFO_SEND;\ + type AFMT_AUDIO_INFO_UPDATE;\ + type HDMI_AUDIO_INFO_LINE;\ + type HDMI_GC_AVMUTE;\ + type DP_MSE_RATE_X;\ + type DP_MSE_RATE_Y;\ + type DP_MSE_RATE_UPDATE_PENDING;\ + type DP_SEC_GSP0_ENABLE;\ + type DP_SEC_STREAM_ENABLE;\ + type DP_SEC_GSP1_ENABLE;\ + type DP_SEC_GSP2_ENABLE;\ + type DP_SEC_GSP3_ENABLE;\ + type DP_SEC_GSP4_ENABLE;\ + type DP_SEC_GSP5_ENABLE;\ + type DP_SEC_GSP6_ENABLE;\ + type DP_SEC_GSP7_ENABLE;\ + type DP_SEC_MPG_ENABLE;\ + type DP_VID_STREAM_DIS_DEFER;\ + type DP_VID_STREAM_ENABLE;\ + type DP_VID_STREAM_STATUS;\ + type DP_STEER_FIFO_RESET;\ + type DP_VID_M_N_GEN_EN;\ + type DP_VID_N;\ + type DP_VID_M;\ + type DIG_START;\ + type AFMT_AUDIO_SRC_SELECT;\ + type AFMT_AUDIO_CHANNEL_ENABLE;\ + type HDMI_AUDIO_PACKETS_PER_LINE;\ + type HDMI_AUDIO_DELAY_EN;\ + type AFMT_60958_CS_UPDATE;\ + type AFMT_AUDIO_LAYOUT_OVRD;\ + type AFMT_60958_OSF_OVRD;\ + type HDMI_ACR_AUTO_SEND;\ + type HDMI_ACR_SOURCE;\ + type HDMI_ACR_AUDIO_PRIORITY;\ + type HDMI_ACR_CTS_32;\ + type HDMI_ACR_N_32;\ + type HDMI_ACR_CTS_44;\ + type HDMI_ACR_N_44;\ + type HDMI_ACR_CTS_48;\ + type HDMI_ACR_N_48;\ + type AFMT_60958_CS_CHANNEL_NUMBER_L;\ + type AFMT_60958_CS_CLOCK_ACCURACY;\ + type AFMT_60958_CS_CHANNEL_NUMBER_R;\ + type AFMT_60958_CS_CHANNEL_NUMBER_2;\ + type AFMT_60958_CS_CHANNEL_NUMBER_3;\ + type AFMT_60958_CS_CHANNEL_NUMBER_4;\ + type AFMT_60958_CS_CHANNEL_NUMBER_5;\ + type AFMT_60958_CS_CHANNEL_NUMBER_6;\ + type AFMT_60958_CS_CHANNEL_NUMBER_7;\ + type DP_SEC_AUD_N;\ + type DP_SEC_TIMESTAMP_MODE;\ + type DP_SEC_ASP_ENABLE;\ + type DP_SEC_ATP_ENABLE;\ + type DP_SEC_AIP_ENABLE;\ + type DP_SEC_ACM_ENABLE;\ + type AFMT_AUDIO_SAMPLE_SEND;\ + type AFMT_AUDIO_CLOCK_EN;\ + type TMDS_PIXEL_ENCODING;\ + type TMDS_COLOR_FORMAT;\ + type DIG_STEREOSYNC_SELECT;\ + type DIG_STEREOSYNC_GATE_EN;\ + type DP_DB_DISABLE;\ + type DP_MSA_MISC0;\ + type DP_MSA_HTOTAL;\ + type DP_MSA_VTOTAL;\ + type DP_MSA_HSTART;\ + type DP_MSA_VSTART;\ + type DP_MSA_HSYNCWIDTH;\ + type DP_MSA_HSYNCPOLARITY;\ + type DP_MSA_VSYNCWIDTH;\ + type DP_MSA_VSYNCPOLARITY;\ + type DP_MSA_HWIDTH;\ + type DP_MSA_VHEIGHT;\ + type HDMI_DB_DISABLE;\ + type DP_VID_N_MUL;\ + type DP_VID_M_DOUBLE_VALUE_EN + struct dcn10_stream_encoder_shift { - uint8_t AFMT_GENERIC_INDEX; - uint8_t AFMT_GENERIC_HB0; - uint8_t AFMT_GENERIC_HB1; - uint8_t AFMT_GENERIC_HB2; - uint8_t AFMT_GENERIC_HB3; - uint8_t AFMT_GENERIC_LOCK_STATUS; - uint8_t AFMT_GENERIC_CONFLICT; - uint8_t AFMT_GENERIC_CONFLICT_CLR; - uint8_t AFMT_GENERIC0_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC1_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC2_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC3_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC4_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC5_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC6_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC7_FRAME_UPDATE_PENDING; - uint8_t AFMT_GENERIC0_FRAME_UPDATE; - uint8_t AFMT_GENERIC1_FRAME_UPDATE; - uint8_t AFMT_GENERIC2_FRAME_UPDATE; - uint8_t AFMT_GENERIC3_FRAME_UPDATE; - uint8_t AFMT_GENERIC4_FRAME_UPDATE; - uint8_t AFMT_GENERIC5_FRAME_UPDATE; - uint8_t AFMT_GENERIC6_FRAME_UPDATE; - uint8_t AFMT_GENERIC7_FRAME_UPDATE; - uint8_t HDMI_GENERIC0_CONT; - uint8_t HDMI_GENERIC0_SEND; - uint8_t HDMI_GENERIC0_LINE; - uint8_t HDMI_GENERIC1_CONT; - uint8_t HDMI_GENERIC1_SEND; - uint8_t HDMI_GENERIC1_LINE; - uint8_t HDMI_GENERIC2_CONT; - uint8_t HDMI_GENERIC2_SEND; - uint8_t HDMI_GENERIC2_LINE; - uint8_t HDMI_GENERIC3_CONT; - uint8_t HDMI_GENERIC3_SEND; - uint8_t HDMI_GENERIC3_LINE; - uint8_t HDMI_GENERIC4_CONT; - uint8_t HDMI_GENERIC4_SEND; - uint8_t HDMI_GENERIC4_LINE; - uint8_t HDMI_GENERIC5_CONT; - uint8_t HDMI_GENERIC5_SEND; - uint8_t HDMI_GENERIC5_LINE; - uint8_t HDMI_GENERIC6_CONT; - uint8_t HDMI_GENERIC6_SEND; - uint8_t HDMI_GENERIC6_LINE; - uint8_t HDMI_GENERIC7_CONT; - uint8_t HDMI_GENERIC7_SEND; - uint8_t HDMI_GENERIC7_LINE; - uint8_t DP_PIXEL_ENCODING; - uint8_t DP_COMPONENT_DEPTH; - uint8_t HDMI_PACKET_GEN_VERSION; - uint8_t HDMI_KEEPOUT_MODE; - uint8_t HDMI_DEEP_COLOR_ENABLE; - uint8_t HDMI_CLOCK_CHANNEL_RATE; - uint8_t HDMI_DEEP_COLOR_DEPTH; - uint8_t HDMI_GC_CONT; - uint8_t HDMI_GC_SEND; - uint8_t HDMI_NULL_SEND; - uint8_t HDMI_DATA_SCRAMBLE_EN; - uint8_t HDMI_AUDIO_INFO_SEND; - uint8_t AFMT_AUDIO_INFO_UPDATE; - uint8_t HDMI_AUDIO_INFO_LINE; - uint8_t HDMI_GC_AVMUTE; - uint8_t DP_MSE_RATE_X; - uint8_t DP_MSE_RATE_Y; - uint8_t DP_MSE_RATE_UPDATE_PENDING; - uint8_t DP_SEC_GSP0_ENABLE; - uint8_t DP_SEC_STREAM_ENABLE; - uint8_t DP_SEC_GSP1_ENABLE; - uint8_t DP_SEC_GSP2_ENABLE; - uint8_t DP_SEC_GSP3_ENABLE; - uint8_t DP_SEC_GSP4_ENABLE; - uint8_t DP_SEC_GSP5_ENABLE; - uint8_t DP_SEC_GSP6_ENABLE; - uint8_t DP_SEC_GSP7_ENABLE; - uint8_t DP_SEC_MPG_ENABLE; - uint8_t DP_VID_STREAM_DIS_DEFER; - uint8_t DP_VID_STREAM_ENABLE; - uint8_t DP_VID_STREAM_STATUS; - uint8_t DP_STEER_FIFO_RESET; - uint8_t DP_VID_M_N_GEN_EN; - uint8_t DP_VID_N; - uint8_t DP_VID_M; - uint8_t DIG_START; - uint8_t AFMT_AUDIO_SRC_SELECT; - uint8_t AFMT_AUDIO_CHANNEL_ENABLE; - uint8_t HDMI_AUDIO_PACKETS_PER_LINE; - uint8_t HDMI_AUDIO_DELAY_EN; - uint8_t AFMT_60958_CS_UPDATE; - uint8_t AFMT_AUDIO_LAYOUT_OVRD; - uint8_t AFMT_60958_OSF_OVRD; - uint8_t HDMI_ACR_AUTO_SEND; - uint8_t HDMI_ACR_SOURCE; - uint8_t HDMI_ACR_AUDIO_PRIORITY; - uint8_t HDMI_ACR_CTS_32; - uint8_t HDMI_ACR_N_32; - uint8_t HDMI_ACR_CTS_44; - uint8_t HDMI_ACR_N_44; - uint8_t HDMI_ACR_CTS_48; - uint8_t HDMI_ACR_N_48; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_L; - uint8_t AFMT_60958_CS_CLOCK_ACCURACY; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_R; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_2; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_3; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_4; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_5; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_6; - uint8_t AFMT_60958_CS_CHANNEL_NUMBER_7; - uint8_t DP_SEC_AUD_N; - uint8_t DP_SEC_TIMESTAMP_MODE; - uint8_t DP_SEC_ASP_ENABLE; - uint8_t DP_SEC_ATP_ENABLE; - uint8_t DP_SEC_AIP_ENABLE; - uint8_t DP_SEC_ACM_ENABLE; - uint8_t AFMT_AUDIO_SAMPLE_SEND; - uint8_t AFMT_AUDIO_CLOCK_EN; - uint8_t TMDS_PIXEL_ENCODING; - uint8_t TMDS_COLOR_FORMAT; - uint8_t DIG_STEREOSYNC_SELECT; - uint8_t DIG_STEREOSYNC_GATE_EN; - uint8_t DP_DB_DISABLE; - uint8_t DP_MSA_MISC0; - uint8_t DP_MSA_HTOTAL; - uint8_t DP_MSA_VTOTAL; - uint8_t DP_MSA_HSTART; - uint8_t DP_MSA_VSTART; - uint8_t DP_MSA_HSYNCWIDTH; - uint8_t DP_MSA_HSYNCPOLARITY; - uint8_t DP_MSA_VSYNCWIDTH; - uint8_t DP_MSA_VSYNCPOLARITY; - uint8_t DP_MSA_HWIDTH; - uint8_t DP_MSA_VHEIGHT; - uint8_t HDMI_DB_DISABLE; - uint8_t DP_VID_N_MUL; - uint8_t DP_VID_M_DOUBLE_VALUE_EN; + SE_REG_FIELD_LIST_DCN1_0(uint8_t); }; struct dcn10_stream_encoder_mask { - uint32_t AFMT_GENERIC_INDEX; - uint32_t AFMT_GENERIC_HB0; - uint32_t AFMT_GENERIC_HB1; - uint32_t AFMT_GENERIC_HB2; - uint32_t AFMT_GENERIC_HB3; - uint32_t AFMT_GENERIC_LOCK_STATUS; - uint32_t AFMT_GENERIC_CONFLICT; - uint32_t AFMT_GENERIC_CONFLICT_CLR; - uint32_t AFMT_GENERIC0_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC1_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC2_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC3_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC4_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC5_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC6_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC7_FRAME_UPDATE_PENDING; - uint32_t AFMT_GENERIC0_FRAME_UPDATE; - uint32_t AFMT_GENERIC1_FRAME_UPDATE; - uint32_t AFMT_GENERIC2_FRAME_UPDATE; - uint32_t AFMT_GENERIC3_FRAME_UPDATE; - uint32_t AFMT_GENERIC4_FRAME_UPDATE; - uint32_t AFMT_GENERIC5_FRAME_UPDATE; - uint32_t AFMT_GENERIC6_FRAME_UPDATE; - uint32_t AFMT_GENERIC7_FRAME_UPDATE; - uint32_t HDMI_GENERIC0_CONT; - uint32_t HDMI_GENERIC0_SEND; - uint32_t HDMI_GENERIC0_LINE; - uint32_t HDMI_GENERIC1_CONT; - uint32_t HDMI_GENERIC1_SEND; - uint32_t HDMI_GENERIC1_LINE; - uint32_t HDMI_GENERIC2_CONT; - uint32_t HDMI_GENERIC2_SEND; - uint32_t HDMI_GENERIC2_LINE; - uint32_t HDMI_GENERIC3_CONT; - uint32_t HDMI_GENERIC3_SEND; - uint32_t HDMI_GENERIC3_LINE; - uint32_t HDMI_GENERIC4_CONT; - uint32_t HDMI_GENERIC4_SEND; - uint32_t HDMI_GENERIC4_LINE; - uint32_t HDMI_GENERIC5_CONT; - uint32_t HDMI_GENERIC5_SEND; - uint32_t HDMI_GENERIC5_LINE; - uint32_t HDMI_GENERIC6_CONT; - uint32_t HDMI_GENERIC6_SEND; - uint32_t HDMI_GENERIC6_LINE; - uint32_t HDMI_GENERIC7_CONT; - uint32_t HDMI_GENERIC7_SEND; - uint32_t HDMI_GENERIC7_LINE; - uint32_t DP_PIXEL_ENCODING; - uint32_t DP_COMPONENT_DEPTH; - uint32_t HDMI_PACKET_GEN_VERSION; - uint32_t HDMI_KEEPOUT_MODE; - uint32_t HDMI_DEEP_COLOR_ENABLE; - uint32_t HDMI_CLOCK_CHANNEL_RATE; - uint32_t HDMI_DEEP_COLOR_DEPTH; - uint32_t HDMI_GC_CONT; - uint32_t HDMI_GC_SEND; - uint32_t HDMI_NULL_SEND; - uint32_t HDMI_DATA_SCRAMBLE_EN; - uint32_t HDMI_AUDIO_INFO_SEND; - uint32_t AFMT_AUDIO_INFO_UPDATE; - uint32_t HDMI_AUDIO_INFO_LINE; - uint32_t HDMI_GC_AVMUTE; - uint32_t DP_MSE_RATE_X; - uint32_t DP_MSE_RATE_Y; - uint32_t DP_MSE_RATE_UPDATE_PENDING; - uint32_t DP_SEC_GSP0_ENABLE; - uint32_t DP_SEC_STREAM_ENABLE; - uint32_t DP_SEC_GSP1_ENABLE; - uint32_t DP_SEC_GSP2_ENABLE; - uint32_t DP_SEC_GSP3_ENABLE; - uint32_t DP_SEC_GSP4_ENABLE; - uint32_t DP_SEC_GSP5_ENABLE; - uint32_t DP_SEC_GSP6_ENABLE; - uint32_t DP_SEC_GSP7_ENABLE; - uint32_t DP_SEC_MPG_ENABLE; - uint32_t DP_VID_STREAM_DIS_DEFER; - uint32_t DP_VID_STREAM_ENABLE; - uint32_t DP_VID_STREAM_STATUS; - uint32_t DP_STEER_FIFO_RESET; - uint32_t DP_VID_M_N_GEN_EN; - uint32_t DP_VID_N; - uint32_t DP_VID_M; - uint32_t DIG_START; - uint32_t AFMT_AUDIO_SRC_SELECT; - uint32_t AFMT_AUDIO_CHANNEL_ENABLE; - uint32_t HDMI_AUDIO_PACKETS_PER_LINE; - uint32_t HDMI_AUDIO_DELAY_EN; - uint32_t AFMT_60958_CS_UPDATE; - uint32_t AFMT_AUDIO_LAYOUT_OVRD; - uint32_t AFMT_60958_OSF_OVRD; - uint32_t HDMI_ACR_AUTO_SEND; - uint32_t HDMI_ACR_SOURCE; - uint32_t HDMI_ACR_AUDIO_PRIORITY; - uint32_t HDMI_ACR_CTS_32; - uint32_t HDMI_ACR_N_32; - uint32_t HDMI_ACR_CTS_44; - uint32_t HDMI_ACR_N_44; - uint32_t HDMI_ACR_CTS_48; - uint32_t HDMI_ACR_N_48; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_L; - uint32_t AFMT_60958_CS_CLOCK_ACCURACY; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_R; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_2; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_3; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_4; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_5; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_6; - uint32_t AFMT_60958_CS_CHANNEL_NUMBER_7; - uint32_t DP_SEC_AUD_N; - uint32_t DP_SEC_TIMESTAMP_MODE; - uint32_t DP_SEC_ASP_ENABLE; - uint32_t DP_SEC_ATP_ENABLE; - uint32_t DP_SEC_AIP_ENABLE; - uint32_t DP_SEC_ACM_ENABLE; - uint32_t AFMT_AUDIO_SAMPLE_SEND; - uint32_t AFMT_AUDIO_CLOCK_EN; - uint32_t TMDS_PIXEL_ENCODING; - uint32_t DIG_STEREOSYNC_SELECT; - uint32_t DIG_STEREOSYNC_GATE_EN; - uint32_t TMDS_COLOR_FORMAT; - uint32_t DP_DB_DISABLE; - uint32_t DP_MSA_MISC0; - uint32_t DP_MSA_HTOTAL; - uint32_t DP_MSA_VTOTAL; - uint32_t DP_MSA_HSTART; - uint32_t DP_MSA_VSTART; - uint32_t DP_MSA_HSYNCWIDTH; - uint32_t DP_MSA_HSYNCPOLARITY; - uint32_t DP_MSA_VSYNCWIDTH; - uint32_t DP_MSA_VSYNCPOLARITY; - uint32_t DP_MSA_HWIDTH; - uint32_t DP_MSA_VHEIGHT; - uint32_t HDMI_DB_DISABLE; - uint32_t DP_VID_N_MUL; - uint32_t DP_VID_M_DOUBLE_VALUE_EN; -}; - -struct dcn10_stream_enc_registers { - uint32_t AFMT_CNTL; - uint32_t AFMT_AVI_INFO0; - uint32_t AFMT_AVI_INFO1; - uint32_t AFMT_AVI_INFO2; - uint32_t AFMT_AVI_INFO3; - uint32_t AFMT_GENERIC_0; - uint32_t AFMT_GENERIC_1; - uint32_t AFMT_GENERIC_2; - uint32_t AFMT_GENERIC_3; - uint32_t AFMT_GENERIC_4; - uint32_t AFMT_GENERIC_5; - uint32_t AFMT_GENERIC_6; - uint32_t AFMT_GENERIC_7; - uint32_t AFMT_GENERIC_HDR; - uint32_t AFMT_INFOFRAME_CONTROL0; - uint32_t AFMT_VBI_PACKET_CONTROL; - uint32_t AFMT_VBI_PACKET_CONTROL1; - uint32_t AFMT_AUDIO_PACKET_CONTROL; - uint32_t AFMT_AUDIO_PACKET_CONTROL2; - uint32_t AFMT_AUDIO_SRC_CONTROL; - uint32_t AFMT_60958_0; - uint32_t AFMT_60958_1; - uint32_t AFMT_60958_2; - uint32_t DIG_FE_CNTL; - uint32_t DP_MSE_RATE_CNTL; - uint32_t DP_MSE_RATE_UPDATE; - uint32_t DP_PIXEL_FORMAT; - uint32_t DP_SEC_CNTL; - uint32_t DP_STEER_FIFO; - uint32_t DP_VID_M; - uint32_t DP_VID_N; - uint32_t DP_VID_STREAM_CNTL; - uint32_t DP_VID_TIMING; - uint32_t DP_SEC_AUD_N; - uint32_t DP_SEC_TIMESTAMP; - uint32_t HDMI_CONTROL; - uint32_t HDMI_GC; - uint32_t HDMI_GENERIC_PACKET_CONTROL0; - uint32_t HDMI_GENERIC_PACKET_CONTROL1; - uint32_t HDMI_GENERIC_PACKET_CONTROL2; - uint32_t HDMI_GENERIC_PACKET_CONTROL3; - uint32_t HDMI_GENERIC_PACKET_CONTROL4; - uint32_t HDMI_GENERIC_PACKET_CONTROL5; - uint32_t HDMI_INFOFRAME_CONTROL0; - uint32_t HDMI_INFOFRAME_CONTROL1; - uint32_t HDMI_VBI_PACKET_CONTROL; - uint32_t HDMI_AUDIO_PACKET_CONTROL; - uint32_t HDMI_ACR_PACKET_CONTROL; - uint32_t HDMI_ACR_32_0; - uint32_t HDMI_ACR_32_1; - uint32_t HDMI_ACR_44_0; - uint32_t HDMI_ACR_44_1; - uint32_t HDMI_ACR_48_0; - uint32_t HDMI_ACR_48_1; - uint32_t TMDS_CNTL; - uint32_t DP_DB_CNTL; - uint32_t DP_MSA_MISC; - uint32_t DP_MSA_COLORIMETRY; - uint32_t DP_MSA_TIMING_PARAM1; - uint32_t DP_MSA_TIMING_PARAM2; - uint32_t DP_MSA_TIMING_PARAM3; - uint32_t DP_MSA_TIMING_PARAM4; - uint32_t HDMI_DB_CONTROL; + SE_REG_FIELD_LIST_DCN1_0(uint32_t); }; struct dcn10_stream_encoder { @@ -581,4 +450,75 @@ void dcn10_stream_encoder_construct( const struct dcn10_stream_encoder_shift *se_shift, const struct dcn10_stream_encoder_mask *se_mask); +void enc1_update_generic_info_packet( + struct dcn10_stream_encoder *enc1, + uint32_t packet_index, + const struct dc_info_packet *info_packet); + +void enc1_stream_encoder_dp_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + enum dc_color_space output_color_space); + +void enc1_stream_encoder_hdmi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + int actual_pix_clk_khz, + bool enable_audio); + +void enc1_stream_encoder_dvi_set_stream_attribute( + struct stream_encoder *enc, + struct dc_crtc_timing *crtc_timing, + bool is_dual_link); + +void enc1_stream_encoder_set_mst_bandwidth( + struct stream_encoder *enc, + struct fixed31_32 avg_time_slots_per_mtp); + +void enc1_stream_encoder_update_dp_info_packets( + struct stream_encoder *enc, + const struct encoder_info_frame *info_frame); + +void enc1_stream_encoder_stop_dp_info_packets( + struct stream_encoder *enc); + +void enc1_stream_encoder_dp_blank( + struct stream_encoder *enc); + +void enc1_stream_encoder_dp_unblank( + struct stream_encoder *enc, + const struct encoder_unblank_param *param); + +void enc1_setup_stereo_sync( + struct stream_encoder *enc, + int tg_inst, bool enable); + +void enc1_stream_encoder_set_avmute( + struct stream_encoder *enc, + bool enable); + +void enc1_se_audio_mute_control( + struct stream_encoder *enc, + bool mute); + +void enc1_se_dp_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info); + +void enc1_se_dp_audio_enable( + struct stream_encoder *enc); + +void enc1_se_dp_audio_disable( + struct stream_encoder *enc); + +void enc1_se_hdmi_audio_setup( + struct stream_encoder *enc, + unsigned int az_inst, + struct audio_info *info, + struct audio_crtc_info *audio_crtc_info); + +void enc1_se_hdmi_audio_disable( + struct stream_encoder *enc); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 015e209e58bc..93da44527d2e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -191,6 +191,23 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA }; +enum dp_pixel_encoding_type { + DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000, + DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001, + DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002, + DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003, + DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004, + DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005 +}; + +enum dp_component_depth { + DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000, + DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001, + DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002, + DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003, + DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004 +}; + enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, -- cgit v1.2.1 From 35ad2254cb7d0a46f135eb57990ca6618f79510b Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 29 Mar 2018 11:23:37 -0400 Subject: drm/amd/display: csc updates require FULL update Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc.c | 12 ++++++++++-- drivers/gpu/drm/amd/display/dc/dc.h | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index b331d9e78cdb..8f09f3ab0c29 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1154,12 +1154,20 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->input_csc_color_matrix) update_flags->bits.input_csc_change = 1; - if (update_flags->bits.in_transfer_func_change - || update_flags->bits.input_csc_change) { + if (u->coeff_reduction_factor) + update_flags->bits.coeff_reduction_change = 1; + + if (update_flags->bits.in_transfer_func_change) { type = UPDATE_TYPE_MED; elevate_update_type(&overall_type, type); } + if (update_flags->bits.input_csc_change + || update_flags->bits.coeff_reduction_change) { + type = UPDATE_TYPE_FULL; + elevate_update_type(&overall_type, type); + } + return overall_type; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 63817ed56c11..7d1a3c5d1b10 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -429,6 +429,7 @@ union surface_update_flags { uint32_t position_change:1; uint32_t in_transfer_func_change:1; uint32_t input_csc_change:1; + uint32_t coeff_reduction_change:1; uint32_t output_tf_change:1; uint32_t pixel_format_change:1; -- cgit v1.2.1 From bb33b1842c3f2592a9be4e80c9d4afe6251a5da6 Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 29 Mar 2018 11:14:25 -0400 Subject: drm/amd/display: Fix FBC text console corruption Signed-off-by: Roman Li Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce110/dce110_compressor.c | 67 +++++++++++++++++----- 1 file changed, 54 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 775d3bf0bd39..9150d2694450 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c @@ -102,6 +102,43 @@ static uint32_t align_to_chunks_number_per_line(uint32_t pixels) return 256 * ((pixels + 255) / 256); } +static void reset_lb_on_vblank(struct dc_context *ctx) +{ + uint32_t value, frame_count; + uint32_t retry = 0; + uint32_t status_pos = + dm_read_reg(ctx, mmCRTC_STATUS_POSITION); + + + /* Only if CRTC is enabled and counter is moving we wait for one frame. */ + if (status_pos != dm_read_reg(ctx, mmCRTC_STATUS_POSITION)) { + /* Resetting LB on VBlank */ + value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL); + set_reg_field_value(value, 3, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); + set_reg_field_value(value, 1, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); + dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value); + + frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); + + + for (retry = 100; retry > 0; retry--) { + if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) + break; + msleep(1); + } + if (!retry) + dm_error("Frame count did not increase for 100ms.\n"); + + /* Resetting LB on VBlank */ + value = dm_read_reg(ctx, mmLB_SYNC_RESET_SEL); + set_reg_field_value(value, 2, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL); + set_reg_field_value(value, 0, LB_SYNC_RESET_SEL, LB_SYNC_RESET_SEL2); + dm_write_reg(ctx, mmLB_SYNC_RESET_SEL, value); + + } + +} + static void wait_for_fbc_state_changed( struct dce110_compressor *cp110, bool enabled) @@ -232,19 +269,23 @@ void dce110_compressor_disable_fbc(struct compressor *compressor) { struct dce110_compressor *cp110 = TO_DCE110_COMPRESSOR(compressor); - if (compressor->options.bits.FBC_SUPPORT && - dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) { - uint32_t reg_data; - /* Turn off compression */ - reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); - set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); - dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); - - /* Reset enum controller_id to undefined */ - compressor->attached_inst = 0; - compressor->is_enabled = false; - - wait_for_fbc_state_changed(cp110, false); + if (compressor->options.bits.FBC_SUPPORT) { + if (dce110_compressor_is_fbc_enabled_in_hw(compressor, NULL)) { + uint32_t reg_data; + /* Turn off compression */ + reg_data = dm_read_reg(compressor->ctx, mmFBC_CNTL); + set_reg_field_value(reg_data, 0, FBC_CNTL, FBC_GRPH_COMP_EN); + dm_write_reg(compressor->ctx, mmFBC_CNTL, reg_data); + + /* Reset enum controller_id to undefined */ + compressor->attached_inst = 0; + compressor->is_enabled = false; + + wait_for_fbc_state_changed(cp110, false); + } + + /* Sync line buffer - dce100/110 only*/ + reset_lb_on_vblank(compressor->ctx); } } -- cgit v1.2.1 From 144de8944805aef45964a904fdfd537486b1ce82 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Thu, 29 Mar 2018 13:11:10 -0400 Subject: drm/amd/display: dal 3.1.41 Signed-off-by: Yongqiang Sun Reviewed-by: Yongqiang Sun Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7d1a3c5d1b10..23349148c7a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.40" +#define DC_VER "3.1.41" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From 8feaccf71dd61f2201493068055e0d1d699014df Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 24 Apr 2018 16:35:49 +0300 Subject: drm/amdkfd: Integer overflows in ioctl args->n_devices is a u32 that comes from the user. The multiplication could overflow on 32 bit systems possibly leading to privilege escalation. Fixes: 5ec7e02854b3 ("drm/amdkfd: Add ioctls for GPUVM memory management") Signed-off-by: Dan Carpenter dan.carpenter@oracle.com> Reviewed-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5694fbead9a5..ce15baf68d4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1303,8 +1303,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, return -EINVAL; } - devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr), - GFP_KERNEL); + devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), + GFP_KERNEL); if (!devices_arr) return -ENOMEM; @@ -1412,8 +1412,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, return -EINVAL; } - devices_arr = kmalloc(args->n_devices * sizeof(*devices_arr), - GFP_KERNEL); + devices_arr = kmalloc_array(args->n_devices, sizeof(*devices_arr), + GFP_KERNEL); if (!devices_arr) return -ENOMEM; -- cgit v1.2.1 From 24f48a42038f5baaae49b181b64782ecfb703a9c Mon Sep 17 00:00:00 2001 From: Oak Zeng Date: Tue, 1 May 2018 17:56:01 -0400 Subject: drm/amdkfd: Dump HQD of HIQ Signed-off-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 9af94b1f9074..668ad07ebe1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1713,6 +1713,18 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0; + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, + KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); + if (!r) { + seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", + KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, + KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + KFD_CIK_HIQ_QUEUE); + seq_reg_dump(m, dump, n_regs); + + kfree(dump); + } + for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { int pipe_offset = pipe * get_queues_per_pipe(dqm); -- cgit v1.2.1 From 87e6d4e0777daf774ed9aa59ed25b6ebaaad7052 Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Tue, 1 May 2018 17:56:02 -0400 Subject: drm/amdkfd: Reduce priority of context-saving waves before spin-wait Synchronization between context-saving wavefronts is achieved by sending a SAVEWAVE message to the SPI and then spin-waiting for a response. These spin-waiting wavefronts may inhibit the progress of other wavefronts in the context save handler, leading to the synchronization condition never being achieved. Before spin-waiting reduce the priority of each wavefront to guarantee foward progress in the others. Signed-off-by: Jay Cornwall Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 10 ++++++++-- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 8 +++++++- 2 files changed, 15 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm index 997a383dcb8b..34eabcdd27a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm @@ -98,6 +98,7 @@ var SWIZZLE_EN = 0 //whether we use swi /**************************************************************************/ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 +var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1 var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12 @@ -319,6 +320,10 @@ end s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC end + // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for. + s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT) + s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp + L_SLEEP: s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0 @@ -1132,7 +1137,7 @@ end #endif static const uint32_t cwsr_trap_gfx8_hex[] = { - 0xbf820001, 0xbf820123, + 0xbf820001, 0xbf820125, 0xb8f4f802, 0x89748674, 0xb8f5f803, 0x8675ff75, 0x00000400, 0xbf850011, @@ -1158,7 +1163,8 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { 0x867aff7a, 0x00007fff, 0xb97af807, 0xbef2007e, 0xbef3007f, 0xbefe0180, - 0xbf900004, 0xbf8e0002, + 0xbf900004, 0x877a8474, + 0xb97af802, 0xbf8e0002, 0xbf88fffe, 0xbef8007e, 0x8679ff7f, 0x0000ffff, 0x8779ff79, 0x00040000, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index 033580c997ea..cac8d4992e04 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -97,6 +97,7 @@ var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing /**************************************************************************/ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23 var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000 +var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1 var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006 var SQ_WAVE_STATUS_HALT_MASK = 0x2000 @@ -362,6 +363,10 @@ end s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC end + // Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for. + s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT) + s_setreg_b32 hwreg(HW_REG_STATUS), s_save_tmp + L_SLEEP: s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0 @@ -1210,7 +1215,7 @@ end #endif static const uint32_t cwsr_trap_gfx9_hex[] = { - 0xbf820001, 0xbf820158, + 0xbf820001, 0xbf82015a, 0xb8f8f802, 0x89788678, 0xb8f1f803, 0x866eff71, 0x00000400, 0xbf850034, @@ -1249,6 +1254,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x00007fff, 0xb970f807, 0xbeee007e, 0xbeef007f, 0xbefe0180, 0xbf900004, + 0x87708478, 0xb970f802, 0xbf8e0002, 0xbf88fffe, 0xb8f02a05, 0x80708170, 0x8e708a70, 0xb8f11605, -- cgit v1.2.1 From 2774c63ef3dbb6052dd1d224b38a9decf89be61c Mon Sep 17 00:00:00 2001 From: Jay Cornwall Date: Tue, 1 May 2018 17:56:03 -0400 Subject: drm/amdkfd: Use volatile MTYPE in default/alternate apertures MTYPE_NC_NV (0) marks scalar/vector L1 cache lines as non-volatile. Cache lines loaded through these apertures are intended to be invalidated before (and sometimes during) a dispatch. The non-volatile qualifier prevents these cache lines from being distinguished from those loaded through the private aperture. Use MTYPE_NC (1) instead on both Gfx7 and Gfx8. This allows the compiler to use the BUFFER_WBINVL1_VOL instruction and is a precursor to automatic per-dispatch scalar/vector L1 volatile invalidation. Signed-off-by: Jay Cornwall Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/cik_regs.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h index 48769d12dd7b..37ce6dd65391 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_regs.h +++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h @@ -33,7 +33,8 @@ #define APE1_MTYPE(x) ((x) << 7) /* valid for both DEFAULT_MTYPE and APE1_MTYPE */ -#define MTYPE_CACHED 0 +#define MTYPE_CACHED_NV 0 +#define MTYPE_CACHED 1 #define MTYPE_NONCACHED 3 #define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8) -- cgit v1.2.1 From fa7e65147e5dcafdf8d6c3787e5b22ec5f6bcbdc Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 1 May 2018 17:56:04 -0400 Subject: drm/amdkfd: use %px to print user space address instead of %p Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_queue.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index ce15baf68d4c..beaa613c22f4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -233,7 +233,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, pr_debug("Queue Size: 0x%llX, %u\n", q_properties->queue_size, args->ring_size); - pr_debug("Queue r/w Pointers: %p, %p\n", + pr_debug("Queue r/w Pointers: %px, %px\n", q_properties->read_ptr, q_properties->write_ptr); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c index a5315d4f1c95..6dcd621e5b71 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c @@ -36,8 +36,8 @@ void print_queue_properties(struct queue_properties *q) pr_debug("Queue Address: 0x%llX\n", q->queue_address); pr_debug("Queue Id: %u\n", q->queue_id); pr_debug("Queue Process Vmid: %u\n", q->vmid); - pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr); - pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr); + pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr); + pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr); pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr); pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off); } @@ -53,8 +53,8 @@ void print_queue(struct queue *q) pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address); pr_debug("Queue Id: %u\n", q->properties.queue_id); pr_debug("Queue Process Vmid: %u\n", q->properties.vmid); - pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr); - pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr); + pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr); + pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr); pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr); pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off); pr_debug("Queue MQD Address: 0x%p\n", q->mqd); -- cgit v1.2.1 From a2e94158b83185c9dac430cb53bff26737a786ef Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 1 May 2018 17:56:05 -0400 Subject: drm/amdkfd: Remove redundant include of amd-iommu.h Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index dd6c7535b6b4..c1d9e2772cbc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -20,9 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2) -#include -#endif #include #include #include -- cgit v1.2.1 From 0db54b24ad676c3f2d0cf5291c9d170e3e15f213 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 1 May 2018 17:56:06 -0400 Subject: drm/amdkfd: Separate trap handler assembly code and its hex values Since the assembly code is inside "#if 0", it is ineffective. Despite that, during debugging, we need to change the assembly code, extract it into a separate file and compile the new file into hex values using sp3. That process also requires us to remove "#if 0" and modify lines starting with "#", so that sp3 can successfully compile the new file. With this change, all the above chore is no longer needed, and cwsr_trap_handler_gfx*.asm can be directly used by sp3 to generate its hex values. Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 560 +++++++++++++++++++++ .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 267 +--------- .../gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 300 +---------- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 +- 4 files changed, 575 insertions(+), 555 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h new file mode 100644 index 000000000000..a546a219d025 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -0,0 +1,560 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +static const uint32_t cwsr_trap_gfx8_hex[] = { + 0xbf820001, 0xbf820125, + 0xb8f4f802, 0x89748674, + 0xb8f5f803, 0x8675ff75, + 0x00000400, 0xbf850011, + 0xc00a1e37, 0x00000000, + 0xbf8c007f, 0x87777978, + 0xbf840002, 0xb974f802, + 0xbe801d78, 0xb8f5f803, + 0x8675ff75, 0x000001ff, + 0xbf850002, 0x80708470, + 0x82718071, 0x8671ff71, + 0x0000ffff, 0xb974f802, + 0xbe801f70, 0xb8f5f803, + 0x8675ff75, 0x00000100, + 0xbf840006, 0xbefa0080, + 0xb97a0203, 0x8671ff71, + 0x0000ffff, 0x80f08870, + 0x82f18071, 0xbefa0080, + 0xb97a0283, 0xbef60068, + 0xbef70069, 0xb8fa1c07, + 0x8e7a9c7a, 0x87717a71, + 0xb8fa03c7, 0x8e7a9b7a, + 0x87717a71, 0xb8faf807, + 0x867aff7a, 0x00007fff, + 0xb97af807, 0xbef2007e, + 0xbef3007f, 0xbefe0180, + 0xbf900004, 0x877a8474, + 0xb97af802, 0xbf8e0002, + 0xbf88fffe, 0xbef8007e, + 0x8679ff7f, 0x0000ffff, + 0x8779ff79, 0x00040000, + 0xbefa0080, 0xbefb00ff, + 0x00807fac, 0x867aff7f, + 0x08000000, 0x8f7a837a, + 0x877b7a7b, 0x867aff7f, + 0x70000000, 0x8f7a817a, + 0x877b7a7b, 0xbeef007c, + 0xbeee0080, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8fa1605, 0x807a817a, + 0x8e7a867a, 0x806e7a6e, + 0xbefa0084, 0xbefa00ff, + 0x01000000, 0xbefe007c, + 0xbefc006e, 0xc0611bfc, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611c3c, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611c7c, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611cbc, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611cfc, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611d3c, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xb8f5f803, + 0xbefe007c, 0xbefc006e, + 0xc0611d7c, 0x0000007c, + 0x806e846e, 0xbefc007e, + 0xbefe007c, 0xbefc006e, + 0xc0611dbc, 0x0000007c, + 0x806e846e, 0xbefc007e, + 0xbefe007c, 0xbefc006e, + 0xc0611dfc, 0x0000007c, + 0x806e846e, 0xbefc007e, + 0xb8eff801, 0xbefe007c, + 0xbefc006e, 0xc0611bfc, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611b3c, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0xbefe007c, + 0xbefc006e, 0xc0611b7c, + 0x0000007c, 0x806e846e, + 0xbefc007e, 0x867aff7f, + 0x04000000, 0xbef30080, + 0x8773737a, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8f51605, 0x80758175, + 0x8e758475, 0x8e7a8275, + 0xbefa00ff, 0x01000000, + 0xbef60178, 0x80786e78, + 0x82798079, 0xbefc0080, + 0xbe802b00, 0xbe822b02, + 0xbe842b04, 0xbe862b06, + 0xbe882b08, 0xbe8a2b0a, + 0xbe8c2b0c, 0xbe8e2b0e, + 0xc06b003c, 0x00000000, + 0xc06b013c, 0x00000010, + 0xc06b023c, 0x00000020, + 0xc06b033c, 0x00000030, + 0x8078c078, 0x82798079, + 0x807c907c, 0xbf0a757c, + 0xbf85ffeb, 0xbef80176, + 0xbeee0080, 0xbefe00c1, + 0xbeff00c1, 0xbefa00ff, + 0x01000000, 0xe0724000, + 0x6e1e0000, 0xe0724100, + 0x6e1e0100, 0xe0724200, + 0x6e1e0200, 0xe0724300, + 0x6e1e0300, 0xbefe00c1, + 0xbeff00c1, 0xb8f54306, + 0x8675c175, 0xbf84002c, + 0xbf8a0000, 0x867aff73, + 0x04000000, 0xbf840028, + 0x8e758675, 0x8e758275, + 0xbefa0075, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8fa1605, 0x807a817a, + 0x8e7a867a, 0x806e7a6e, + 0x806eff6e, 0x00000080, + 0xbefa00ff, 0x01000000, + 0xbefc0080, 0xd28c0002, + 0x000100c1, 0xd28d0003, + 0x000204c1, 0xd1060002, + 0x00011103, 0x7e0602ff, + 0x00000200, 0xbefc00ff, + 0x00010000, 0xbe80007b, + 0x867bff7b, 0xff7fffff, + 0x877bff7b, 0x00058000, + 0xd8ec0000, 0x00000002, + 0xbf8c007f, 0xe0765000, + 0x6e1e0002, 0x32040702, + 0xd0c9006a, 0x0000eb02, + 0xbf87fff7, 0xbefb0000, + 0xbeee00ff, 0x00000400, + 0xbefe00c1, 0xbeff00c1, + 0xb8f52a05, 0x80758175, + 0x8e758275, 0x8e7a8875, + 0xbefa00ff, 0x01000000, + 0xbefc0084, 0xbf0a757c, + 0xbf840015, 0xbf11017c, + 0x8075ff75, 0x00001000, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0xe0724000, 0x6e1e0000, + 0xe0724100, 0x6e1e0100, + 0xe0724200, 0x6e1e0200, + 0xe0724300, 0x6e1e0300, + 0x807c847c, 0x806eff6e, + 0x00000400, 0xbf0a757c, + 0xbf85ffef, 0xbf9c0000, + 0xbf8200ca, 0xbef8007e, + 0x8679ff7f, 0x0000ffff, + 0x8779ff79, 0x00040000, + 0xbefa0080, 0xbefb00ff, + 0x00807fac, 0x8676ff7f, + 0x08000000, 0x8f768376, + 0x877b767b, 0x8676ff7f, + 0x70000000, 0x8f768176, + 0x877b767b, 0x8676ff7f, + 0x04000000, 0xbf84001e, + 0xbefe00c1, 0xbeff00c1, + 0xb8f34306, 0x8673c173, + 0xbf840019, 0x8e738673, + 0x8e738273, 0xbefa0073, + 0xb8f22a05, 0x80728172, + 0x8e728a72, 0xb8f61605, + 0x80768176, 0x8e768676, + 0x80727672, 0x8072ff72, + 0x00000080, 0xbefa00ff, + 0x01000000, 0xbefc0080, + 0xe0510000, 0x721e0000, + 0xe0510100, 0x721e0000, + 0x807cff7c, 0x00000200, + 0x8072ff72, 0x00000200, + 0xbf0a737c, 0xbf85fff6, + 0xbef20080, 0xbefe00c1, + 0xbeff00c1, 0xb8f32a05, + 0x80738173, 0x8e738273, + 0x8e7a8873, 0xbefa00ff, + 0x01000000, 0xbef60072, + 0x8072ff72, 0x00000400, + 0xbefc0084, 0xbf11087c, + 0x8073ff73, 0x00008000, + 0xe0524000, 0x721e0000, + 0xe0524100, 0x721e0100, + 0xe0524200, 0x721e0200, + 0xe0524300, 0x721e0300, + 0xbf8c0f70, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0x807c847c, + 0x8072ff72, 0x00000400, + 0xbf0a737c, 0xbf85ffee, + 0xbf9c0000, 0xe0524000, + 0x761e0000, 0xe0524100, + 0x761e0100, 0xe0524200, + 0x761e0200, 0xe0524300, + 0x761e0300, 0xb8f22a05, + 0x80728172, 0x8e728a72, + 0xb8f61605, 0x80768176, + 0x8e768676, 0x80727672, + 0x80f2c072, 0xb8f31605, + 0x80738173, 0x8e738473, + 0x8e7a8273, 0xbefa00ff, + 0x01000000, 0xbefc0073, + 0xc031003c, 0x00000072, + 0x80f2c072, 0xbf8c007f, + 0x80fc907c, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff1, 0xb8f22a05, + 0x80728172, 0x8e728a72, + 0xb8f61605, 0x80768176, + 0x8e768676, 0x80727672, + 0xbefa0084, 0xbefa00ff, + 0x01000000, 0xc0211cfc, + 0x00000072, 0x80728472, + 0xc0211c3c, 0x00000072, + 0x80728472, 0xc0211c7c, + 0x00000072, 0x80728472, + 0xc0211bbc, 0x00000072, + 0x80728472, 0xc0211bfc, + 0x00000072, 0x80728472, + 0xc0211d3c, 0x00000072, + 0x80728472, 0xc0211d7c, + 0x00000072, 0x80728472, + 0xc0211a3c, 0x00000072, + 0x80728472, 0xc0211a7c, + 0x00000072, 0x80728472, + 0xc0211dfc, 0x00000072, + 0x80728472, 0xc0211b3c, + 0x00000072, 0x80728472, + 0xc0211b7c, 0x00000072, + 0x80728472, 0xbf8c007f, + 0x8671ff71, 0x0000ffff, + 0xbefc0073, 0xbefe006e, + 0xbeff006f, 0x867375ff, + 0x000003ff, 0xb9734803, + 0x867375ff, 0xfffff800, + 0x8f738b73, 0xb973a2c3, + 0xb977f801, 0x8673ff71, + 0xf0000000, 0x8f739c73, + 0x8e739073, 0xbef60080, + 0x87767376, 0x8673ff71, + 0x08000000, 0x8f739b73, + 0x8e738f73, 0x87767376, + 0x8673ff74, 0x00800000, + 0x8f739773, 0xb976f807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb974f802, 0xbf8a0000, + 0x95807370, 0xbf810000, +}; + + +static const uint32_t cwsr_trap_gfx9_hex[] = { + 0xbf820001, 0xbf82015a, + 0xb8f8f802, 0x89788678, + 0xb8f1f803, 0x866eff71, + 0x00000400, 0xbf850034, + 0x866eff71, 0x00000800, + 0xbf850003, 0x866eff71, + 0x00000100, 0xbf840008, + 0x866eff78, 0x00002000, + 0xbf840001, 0xbf810000, + 0x8778ff78, 0x00002000, + 0x80ec886c, 0x82ed806d, + 0xb8eef807, 0x866fff6e, + 0x001f8000, 0x8e6f8b6f, + 0x8977ff77, 0xfc000000, + 0x87776f77, 0x896eff6e, + 0x001f8000, 0xb96ef807, + 0xb8f0f812, 0xb8f1f813, + 0x8ef08870, 0xc0071bb8, + 0x00000000, 0xbf8cc07f, + 0xc0071c38, 0x00000008, + 0xbf8cc07f, 0x86ee6e6e, + 0xbf840001, 0xbe801d6e, + 0xb8f1f803, 0x8671ff71, + 0x000001ff, 0xbf850002, + 0x806c846c, 0x826d806d, + 0x866dff6d, 0x0000ffff, + 0x8f6e8b77, 0x866eff6e, + 0x001f8000, 0xb96ef807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb978f802, 0xbe801f6c, + 0x866dff6d, 0x0000ffff, + 0xbef00080, 0xb9700283, + 0xb8f02407, 0x8e709c70, + 0x876d706d, 0xb8f003c7, + 0x8e709b70, 0x876d706d, + 0xb8f0f807, 0x8670ff70, + 0x00007fff, 0xb970f807, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbf900004, + 0x87708478, 0xb970f802, + 0xbf8e0002, 0xbf88fffe, + 0xb8f02a05, 0x80708170, + 0x8e708a70, 0xb8f11605, + 0x80718171, 0x8e718671, + 0x80707170, 0x80707e70, + 0x8271807f, 0x8671ff71, + 0x0000ffff, 0xc0471cb8, + 0x00000040, 0xbf8cc07f, + 0xc04b1d38, 0x00000048, + 0xbf8cc07f, 0xc0431e78, + 0x00000058, 0xbf8cc07f, + 0xc0471eb8, 0x0000005c, + 0xbf8cc07f, 0xbef4007e, + 0x8675ff7f, 0x0000ffff, + 0x8775ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x00807fac, 0x8670ff7f, + 0x08000000, 0x8f708370, + 0x87777077, 0x8670ff7f, + 0x70000000, 0x8f708170, + 0x87777077, 0xbefb007c, + 0xbefa0080, 0xb8fa2a05, + 0x807a817a, 0x8e7a8a7a, + 0xb8f01605, 0x80708170, + 0x8e708670, 0x807a707a, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xbefe007c, + 0xbefc007a, 0xc0611efa, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611b3a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611b7a, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611bba, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611bfa, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611e3a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xb8f1f803, + 0xbefe007c, 0xbefc007a, + 0xc0611c7a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xbefe007c, + 0xbefc007a, 0xc0611a3a, + 0x0000007c, 0xbf8cc07f, + 0x807a847a, 0xbefc007e, + 0xbefe007c, 0xbefc007a, + 0xc0611a7a, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0xb8fbf801, + 0xbefe007c, 0xbefc007a, + 0xc0611efa, 0x0000007c, + 0xbf8cc07f, 0x807a847a, + 0xbefc007e, 0x8670ff7f, + 0x04000000, 0xbeef0080, + 0x876f6f70, 0xb8fa2a05, + 0x807a817a, 0x8e7a8a7a, + 0xb8f11605, 0x80718171, + 0x8e718471, 0x8e768271, + 0xbef600ff, 0x01000000, + 0xbef20174, 0x80747a74, + 0x82758075, 0xbefc0080, + 0xbf800000, 0xbe802b00, + 0xbe822b02, 0xbe842b04, + 0xbe862b06, 0xbe882b08, + 0xbe8a2b0a, 0xbe8c2b0c, + 0xbe8e2b0e, 0xc06b003a, + 0x00000000, 0xbf8cc07f, + 0xc06b013a, 0x00000010, + 0xbf8cc07f, 0xc06b023a, + 0x00000020, 0xbf8cc07f, + 0xc06b033a, 0x00000030, + 0xbf8cc07f, 0x8074c074, + 0x82758075, 0x807c907c, + 0xbf0a717c, 0xbf85ffe7, + 0xbef40172, 0xbefa0080, + 0xbefe00c1, 0xbeff00c1, + 0xbee80080, 0xbee90080, + 0xbef600ff, 0x01000000, + 0xe0724000, 0x7a1d0000, + 0xe0724100, 0x7a1d0100, + 0xe0724200, 0x7a1d0200, + 0xe0724300, 0x7a1d0300, + 0xbefe00c1, 0xbeff00c1, + 0xb8f14306, 0x8671c171, + 0xbf84002c, 0xbf8a0000, + 0x8670ff6f, 0x04000000, + 0xbf840028, 0x8e718671, + 0x8e718271, 0xbef60071, + 0xb8fa2a05, 0x807a817a, + 0x8e7a8a7a, 0xb8f01605, + 0x80708170, 0x8e708670, + 0x807a707a, 0x807aff7a, + 0x00000080, 0xbef600ff, + 0x01000000, 0xbefc0080, + 0xd28c0002, 0x000100c1, + 0xd28d0003, 0x000204c1, + 0xd1060002, 0x00011103, + 0x7e0602ff, 0x00000200, + 0xbefc00ff, 0x00010000, + 0xbe800077, 0x8677ff77, + 0xff7fffff, 0x8777ff77, + 0x00058000, 0xd8ec0000, + 0x00000002, 0xbf8cc07f, + 0xe0765000, 0x7a1d0002, + 0x68040702, 0xd0c9006a, + 0x0000e302, 0xbf87fff7, + 0xbef70000, 0xbefa00ff, + 0x00000400, 0xbefe00c1, + 0xbeff00c1, 0xb8f12a05, + 0x80718171, 0x8e718271, + 0x8e768871, 0xbef600ff, + 0x01000000, 0xbefc0084, + 0xbf0a717c, 0xbf840015, + 0xbf11017c, 0x8071ff71, + 0x00001000, 0x7e000300, + 0x7e020301, 0x7e040302, + 0x7e060303, 0xe0724000, + 0x7a1d0000, 0xe0724100, + 0x7a1d0100, 0xe0724200, + 0x7a1d0200, 0xe0724300, + 0x7a1d0300, 0x807c847c, + 0x807aff7a, 0x00000400, + 0xbf0a717c, 0xbf85ffef, + 0xbf9c0000, 0xbf8200d9, + 0xbef4007e, 0x8675ff7f, + 0x0000ffff, 0x8775ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x00807fac, + 0x866eff7f, 0x08000000, + 0x8f6e836e, 0x87776e77, + 0x866eff7f, 0x70000000, + 0x8f6e816e, 0x87776e77, + 0x866eff7f, 0x04000000, + 0xbf84001e, 0xbefe00c1, + 0xbeff00c1, 0xb8ef4306, + 0x866fc16f, 0xbf840019, + 0x8e6f866f, 0x8e6f826f, + 0xbef6006f, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0x8078ff78, 0x00000080, + 0xbef600ff, 0x01000000, + 0xbefc0080, 0xe0510000, + 0x781d0000, 0xe0510100, + 0x781d0000, 0x807cff7c, + 0x00000200, 0x8078ff78, + 0x00000200, 0xbf0a6f7c, + 0xbf85fff6, 0xbef80080, + 0xbefe00c1, 0xbeff00c1, + 0xb8ef2a05, 0x806f816f, + 0x8e6f826f, 0x8e76886f, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbefc0084, + 0xbf11087c, 0x806fff6f, + 0x00008000, 0xe0524000, + 0x781d0000, 0xe0524100, + 0x781d0100, 0xe0524200, + 0x781d0200, 0xe0524300, + 0x781d0300, 0xbf8c0f70, + 0x7e000300, 0x7e020301, + 0x7e040302, 0x7e060303, + 0x807c847c, 0x8078ff78, + 0x00000400, 0xbf0a6f7c, + 0xbf85ffee, 0xbf9c0000, + 0xe0524000, 0x6e1d0000, + 0xe0524100, 0x6e1d0100, + 0xe0524200, 0x6e1d0200, + 0xe0524300, 0x6e1d0300, + 0xb8f82a05, 0x80788178, + 0x8e788a78, 0xb8ee1605, + 0x806e816e, 0x8e6e866e, + 0x80786e78, 0x80f8c078, + 0xb8ef1605, 0x806f816f, + 0x8e6f846f, 0x8e76826f, + 0xbef600ff, 0x01000000, + 0xbefc006f, 0xc031003a, + 0x00000078, 0x80f8c078, + 0xbf8cc07f, 0x80fc907c, + 0xbf800000, 0xbe802d00, + 0xbe822d02, 0xbe842d04, + 0xbe862d06, 0xbe882d08, + 0xbe8a2d0a, 0xbe8c2d0c, + 0xbe8e2d0e, 0xbf06807c, + 0xbf84fff0, 0xb8f82a05, + 0x80788178, 0x8e788a78, + 0xb8ee1605, 0x806e816e, + 0x8e6e866e, 0x80786e78, + 0xbef60084, 0xbef600ff, + 0x01000000, 0xc0211bfa, + 0x00000078, 0x80788478, + 0xc0211b3a, 0x00000078, + 0x80788478, 0xc0211b7a, + 0x00000078, 0x80788478, + 0xc0211eba, 0x00000078, + 0x80788478, 0xc0211efa, + 0x00000078, 0x80788478, + 0xc0211c3a, 0x00000078, + 0x80788478, 0xc0211c7a, + 0x00000078, 0x80788478, + 0xc0211a3a, 0x00000078, + 0x80788478, 0xc0211a7a, + 0x00000078, 0x80788478, + 0xc0211cfa, 0x00000078, + 0x80788478, 0xbf8cc07f, + 0x866dff6d, 0x0000ffff, + 0xbefc006f, 0xbefe007a, + 0xbeff007b, 0x866f71ff, + 0x000003ff, 0xb96f4803, + 0x866f71ff, 0xfffff800, + 0x8f6f8b6f, 0xb96fa2c3, + 0xb973f801, 0xb8ee2a05, + 0x806e816e, 0x8e6e8a6e, + 0xb8ef1605, 0x806f816f, + 0x8e6f866f, 0x806e6f6e, + 0x806e746e, 0x826f8075, + 0x866fff6f, 0x0000ffff, + 0xc0071cb7, 0x00000040, + 0xc00b1d37, 0x00000048, + 0xc0031e77, 0x00000058, + 0xc0071eb7, 0x0000005c, + 0xbf8cc07f, 0x866fff6d, + 0xf0000000, 0x8f6f9c6f, + 0x8e6f906f, 0xbeee0080, + 0x876e6f6e, 0x866fff6d, + 0x08000000, 0x8f6f9b6f, + 0x8e6f8f6f, 0x876e6f6e, + 0x866fff70, 0x00800000, + 0x8f6f976f, 0xb96ef807, + 0x86fe7e7e, 0x86ea6a6a, + 0xb970f802, 0xbf8a0000, + 0x95806f6c, 0xbf810000, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm index 34eabcdd27a0..658a4c6be8e4 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm @@ -20,9 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#if 0 -HW (VI) source code for CWSR trap handler -#Version 18 + multiple trap handler +/* To compile this assembly code: + * PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex + */ + +/* HW (VI) source code for CWSR trap handler */ +/* Version 18 + multiple trap handler */ // this performance-optimal version was originally from Seven Xu at SRDC @@ -150,7 +153,7 @@ var s_save_spi_init_lo = exec_lo var s_save_spi_init_hi = exec_hi //tba_lo and tba_hi need to be saved/restored -var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} +var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} var s_save_pc_hi = ttmp1 var s_save_exec_lo = ttmp2 var s_save_exec_hi = ttmp3 @@ -1132,259 +1135,3 @@ end function get_hwreg_size_bytes return 128 //HWREG size 128 bytes end - - -#endif - -static const uint32_t cwsr_trap_gfx8_hex[] = { - 0xbf820001, 0xbf820125, - 0xb8f4f802, 0x89748674, - 0xb8f5f803, 0x8675ff75, - 0x00000400, 0xbf850011, - 0xc00a1e37, 0x00000000, - 0xbf8c007f, 0x87777978, - 0xbf840002, 0xb974f802, - 0xbe801d78, 0xb8f5f803, - 0x8675ff75, 0x000001ff, - 0xbf850002, 0x80708470, - 0x82718071, 0x8671ff71, - 0x0000ffff, 0xb974f802, - 0xbe801f70, 0xb8f5f803, - 0x8675ff75, 0x00000100, - 0xbf840006, 0xbefa0080, - 0xb97a0203, 0x8671ff71, - 0x0000ffff, 0x80f08870, - 0x82f18071, 0xbefa0080, - 0xb97a0283, 0xbef60068, - 0xbef70069, 0xb8fa1c07, - 0x8e7a9c7a, 0x87717a71, - 0xb8fa03c7, 0x8e7a9b7a, - 0x87717a71, 0xb8faf807, - 0x867aff7a, 0x00007fff, - 0xb97af807, 0xbef2007e, - 0xbef3007f, 0xbefe0180, - 0xbf900004, 0x877a8474, - 0xb97af802, 0xbf8e0002, - 0xbf88fffe, 0xbef8007e, - 0x8679ff7f, 0x0000ffff, - 0x8779ff79, 0x00040000, - 0xbefa0080, 0xbefb00ff, - 0x00807fac, 0x867aff7f, - 0x08000000, 0x8f7a837a, - 0x877b7a7b, 0x867aff7f, - 0x70000000, 0x8f7a817a, - 0x877b7a7b, 0xbeef007c, - 0xbeee0080, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0xb8fa1605, 0x807a817a, - 0x8e7a867a, 0x806e7a6e, - 0xbefa0084, 0xbefa00ff, - 0x01000000, 0xbefe007c, - 0xbefc006e, 0xc0611bfc, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611c3c, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611c7c, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611cbc, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611cfc, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611d3c, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xb8f5f803, - 0xbefe007c, 0xbefc006e, - 0xc0611d7c, 0x0000007c, - 0x806e846e, 0xbefc007e, - 0xbefe007c, 0xbefc006e, - 0xc0611dbc, 0x0000007c, - 0x806e846e, 0xbefc007e, - 0xbefe007c, 0xbefc006e, - 0xc0611dfc, 0x0000007c, - 0x806e846e, 0xbefc007e, - 0xb8eff801, 0xbefe007c, - 0xbefc006e, 0xc0611bfc, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611b3c, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0xbefe007c, - 0xbefc006e, 0xc0611b7c, - 0x0000007c, 0x806e846e, - 0xbefc007e, 0x867aff7f, - 0x04000000, 0xbef30080, - 0x8773737a, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0xb8f51605, 0x80758175, - 0x8e758475, 0x8e7a8275, - 0xbefa00ff, 0x01000000, - 0xbef60178, 0x80786e78, - 0x82798079, 0xbefc0080, - 0xbe802b00, 0xbe822b02, - 0xbe842b04, 0xbe862b06, - 0xbe882b08, 0xbe8a2b0a, - 0xbe8c2b0c, 0xbe8e2b0e, - 0xc06b003c, 0x00000000, - 0xc06b013c, 0x00000010, - 0xc06b023c, 0x00000020, - 0xc06b033c, 0x00000030, - 0x8078c078, 0x82798079, - 0x807c907c, 0xbf0a757c, - 0xbf85ffeb, 0xbef80176, - 0xbeee0080, 0xbefe00c1, - 0xbeff00c1, 0xbefa00ff, - 0x01000000, 0xe0724000, - 0x6e1e0000, 0xe0724100, - 0x6e1e0100, 0xe0724200, - 0x6e1e0200, 0xe0724300, - 0x6e1e0300, 0xbefe00c1, - 0xbeff00c1, 0xb8f54306, - 0x8675c175, 0xbf84002c, - 0xbf8a0000, 0x867aff73, - 0x04000000, 0xbf840028, - 0x8e758675, 0x8e758275, - 0xbefa0075, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0xb8fa1605, 0x807a817a, - 0x8e7a867a, 0x806e7a6e, - 0x806eff6e, 0x00000080, - 0xbefa00ff, 0x01000000, - 0xbefc0080, 0xd28c0002, - 0x000100c1, 0xd28d0003, - 0x000204c1, 0xd1060002, - 0x00011103, 0x7e0602ff, - 0x00000200, 0xbefc00ff, - 0x00010000, 0xbe80007b, - 0x867bff7b, 0xff7fffff, - 0x877bff7b, 0x00058000, - 0xd8ec0000, 0x00000002, - 0xbf8c007f, 0xe0765000, - 0x6e1e0002, 0x32040702, - 0xd0c9006a, 0x0000eb02, - 0xbf87fff7, 0xbefb0000, - 0xbeee00ff, 0x00000400, - 0xbefe00c1, 0xbeff00c1, - 0xb8f52a05, 0x80758175, - 0x8e758275, 0x8e7a8875, - 0xbefa00ff, 0x01000000, - 0xbefc0084, 0xbf0a757c, - 0xbf840015, 0xbf11017c, - 0x8075ff75, 0x00001000, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, - 0xe0724000, 0x6e1e0000, - 0xe0724100, 0x6e1e0100, - 0xe0724200, 0x6e1e0200, - 0xe0724300, 0x6e1e0300, - 0x807c847c, 0x806eff6e, - 0x00000400, 0xbf0a757c, - 0xbf85ffef, 0xbf9c0000, - 0xbf8200ca, 0xbef8007e, - 0x8679ff7f, 0x0000ffff, - 0x8779ff79, 0x00040000, - 0xbefa0080, 0xbefb00ff, - 0x00807fac, 0x8676ff7f, - 0x08000000, 0x8f768376, - 0x877b767b, 0x8676ff7f, - 0x70000000, 0x8f768176, - 0x877b767b, 0x8676ff7f, - 0x04000000, 0xbf84001e, - 0xbefe00c1, 0xbeff00c1, - 0xb8f34306, 0x8673c173, - 0xbf840019, 0x8e738673, - 0x8e738273, 0xbefa0073, - 0xb8f22a05, 0x80728172, - 0x8e728a72, 0xb8f61605, - 0x80768176, 0x8e768676, - 0x80727672, 0x8072ff72, - 0x00000080, 0xbefa00ff, - 0x01000000, 0xbefc0080, - 0xe0510000, 0x721e0000, - 0xe0510100, 0x721e0000, - 0x807cff7c, 0x00000200, - 0x8072ff72, 0x00000200, - 0xbf0a737c, 0xbf85fff6, - 0xbef20080, 0xbefe00c1, - 0xbeff00c1, 0xb8f32a05, - 0x80738173, 0x8e738273, - 0x8e7a8873, 0xbefa00ff, - 0x01000000, 0xbef60072, - 0x8072ff72, 0x00000400, - 0xbefc0084, 0xbf11087c, - 0x8073ff73, 0x00008000, - 0xe0524000, 0x721e0000, - 0xe0524100, 0x721e0100, - 0xe0524200, 0x721e0200, - 0xe0524300, 0x721e0300, - 0xbf8c0f70, 0x7e000300, - 0x7e020301, 0x7e040302, - 0x7e060303, 0x807c847c, - 0x8072ff72, 0x00000400, - 0xbf0a737c, 0xbf85ffee, - 0xbf9c0000, 0xe0524000, - 0x761e0000, 0xe0524100, - 0x761e0100, 0xe0524200, - 0x761e0200, 0xe0524300, - 0x761e0300, 0xb8f22a05, - 0x80728172, 0x8e728a72, - 0xb8f61605, 0x80768176, - 0x8e768676, 0x80727672, - 0x80f2c072, 0xb8f31605, - 0x80738173, 0x8e738473, - 0x8e7a8273, 0xbefa00ff, - 0x01000000, 0xbefc0073, - 0xc031003c, 0x00000072, - 0x80f2c072, 0xbf8c007f, - 0x80fc907c, 0xbe802d00, - 0xbe822d02, 0xbe842d04, - 0xbe862d06, 0xbe882d08, - 0xbe8a2d0a, 0xbe8c2d0c, - 0xbe8e2d0e, 0xbf06807c, - 0xbf84fff1, 0xb8f22a05, - 0x80728172, 0x8e728a72, - 0xb8f61605, 0x80768176, - 0x8e768676, 0x80727672, - 0xbefa0084, 0xbefa00ff, - 0x01000000, 0xc0211cfc, - 0x00000072, 0x80728472, - 0xc0211c3c, 0x00000072, - 0x80728472, 0xc0211c7c, - 0x00000072, 0x80728472, - 0xc0211bbc, 0x00000072, - 0x80728472, 0xc0211bfc, - 0x00000072, 0x80728472, - 0xc0211d3c, 0x00000072, - 0x80728472, 0xc0211d7c, - 0x00000072, 0x80728472, - 0xc0211a3c, 0x00000072, - 0x80728472, 0xc0211a7c, - 0x00000072, 0x80728472, - 0xc0211dfc, 0x00000072, - 0x80728472, 0xc0211b3c, - 0x00000072, 0x80728472, - 0xc0211b7c, 0x00000072, - 0x80728472, 0xbf8c007f, - 0x8671ff71, 0x0000ffff, - 0xbefc0073, 0xbefe006e, - 0xbeff006f, 0x867375ff, - 0x000003ff, 0xb9734803, - 0x867375ff, 0xfffff800, - 0x8f738b73, 0xb973a2c3, - 0xb977f801, 0x8673ff71, - 0xf0000000, 0x8f739c73, - 0x8e739073, 0xbef60080, - 0x87767376, 0x8673ff71, - 0x08000000, 0x8f739b73, - 0x8e738f73, 0x87767376, - 0x8673ff74, 0x00800000, - 0x8f739773, 0xb976f807, - 0x86fe7e7e, 0x86ea6a6a, - 0xb974f802, 0xbf8a0000, - 0x95807370, 0xbf810000, -}; - diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index cac8d4992e04..065f55ae9e41 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -20,9 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#if 0 -HW (GFX9) source code for CWSR trap handler -#Version 18 + multiple trap handler +/* To compile this assembly code: + * PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex + */ + +/* HW (GFX9) source code for CWSR trap handler */ +/* Version 18 + multiple trap handler */ // this performance-optimal version was originally from Seven Xu at SRDC @@ -151,7 +154,7 @@ var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME var s_save_spi_init_lo = exec_lo var s_save_spi_init_hi = exec_hi -var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3??h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} +var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3'h0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]} var s_save_pc_hi = ttmp1 var s_save_exec_lo = ttmp2 var s_save_exec_hi = ttmp3 @@ -1210,292 +1213,3 @@ function ack_sqc_store_workaround s_waitcnt lgkmcnt(0) end end - - -#endif - -static const uint32_t cwsr_trap_gfx9_hex[] = { - 0xbf820001, 0xbf82015a, - 0xb8f8f802, 0x89788678, - 0xb8f1f803, 0x866eff71, - 0x00000400, 0xbf850034, - 0x866eff71, 0x00000800, - 0xbf850003, 0x866eff71, - 0x00000100, 0xbf840008, - 0x866eff78, 0x00002000, - 0xbf840001, 0xbf810000, - 0x8778ff78, 0x00002000, - 0x80ec886c, 0x82ed806d, - 0xb8eef807, 0x866fff6e, - 0x001f8000, 0x8e6f8b6f, - 0x8977ff77, 0xfc000000, - 0x87776f77, 0x896eff6e, - 0x001f8000, 0xb96ef807, - 0xb8f0f812, 0xb8f1f813, - 0x8ef08870, 0xc0071bb8, - 0x00000000, 0xbf8cc07f, - 0xc0071c38, 0x00000008, - 0xbf8cc07f, 0x86ee6e6e, - 0xbf840001, 0xbe801d6e, - 0xb8f1f803, 0x8671ff71, - 0x000001ff, 0xbf850002, - 0x806c846c, 0x826d806d, - 0x866dff6d, 0x0000ffff, - 0x8f6e8b77, 0x866eff6e, - 0x001f8000, 0xb96ef807, - 0x86fe7e7e, 0x86ea6a6a, - 0xb978f802, 0xbe801f6c, - 0x866dff6d, 0x0000ffff, - 0xbef00080, 0xb9700283, - 0xb8f02407, 0x8e709c70, - 0x876d706d, 0xb8f003c7, - 0x8e709b70, 0x876d706d, - 0xb8f0f807, 0x8670ff70, - 0x00007fff, 0xb970f807, - 0xbeee007e, 0xbeef007f, - 0xbefe0180, 0xbf900004, - 0x87708478, 0xb970f802, - 0xbf8e0002, 0xbf88fffe, - 0xb8f02a05, 0x80708170, - 0x8e708a70, 0xb8f11605, - 0x80718171, 0x8e718671, - 0x80707170, 0x80707e70, - 0x8271807f, 0x8671ff71, - 0x0000ffff, 0xc0471cb8, - 0x00000040, 0xbf8cc07f, - 0xc04b1d38, 0x00000048, - 0xbf8cc07f, 0xc0431e78, - 0x00000058, 0xbf8cc07f, - 0xc0471eb8, 0x0000005c, - 0xbf8cc07f, 0xbef4007e, - 0x8675ff7f, 0x0000ffff, - 0x8775ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x00807fac, 0x8670ff7f, - 0x08000000, 0x8f708370, - 0x87777077, 0x8670ff7f, - 0x70000000, 0x8f708170, - 0x87777077, 0xbefb007c, - 0xbefa0080, 0xb8fa2a05, - 0x807a817a, 0x8e7a8a7a, - 0xb8f01605, 0x80708170, - 0x8e708670, 0x807a707a, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xbefe007c, - 0xbefc007a, 0xc0611efa, - 0x0000007c, 0xbf8cc07f, - 0x807a847a, 0xbefc007e, - 0xbefe007c, 0xbefc007a, - 0xc0611b3a, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0xbefe007c, - 0xbefc007a, 0xc0611b7a, - 0x0000007c, 0xbf8cc07f, - 0x807a847a, 0xbefc007e, - 0xbefe007c, 0xbefc007a, - 0xc0611bba, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0xbefe007c, - 0xbefc007a, 0xc0611bfa, - 0x0000007c, 0xbf8cc07f, - 0x807a847a, 0xbefc007e, - 0xbefe007c, 0xbefc007a, - 0xc0611e3a, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0xb8f1f803, - 0xbefe007c, 0xbefc007a, - 0xc0611c7a, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0xbefe007c, - 0xbefc007a, 0xc0611a3a, - 0x0000007c, 0xbf8cc07f, - 0x807a847a, 0xbefc007e, - 0xbefe007c, 0xbefc007a, - 0xc0611a7a, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0xb8fbf801, - 0xbefe007c, 0xbefc007a, - 0xc0611efa, 0x0000007c, - 0xbf8cc07f, 0x807a847a, - 0xbefc007e, 0x8670ff7f, - 0x04000000, 0xbeef0080, - 0x876f6f70, 0xb8fa2a05, - 0x807a817a, 0x8e7a8a7a, - 0xb8f11605, 0x80718171, - 0x8e718471, 0x8e768271, - 0xbef600ff, 0x01000000, - 0xbef20174, 0x80747a74, - 0x82758075, 0xbefc0080, - 0xbf800000, 0xbe802b00, - 0xbe822b02, 0xbe842b04, - 0xbe862b06, 0xbe882b08, - 0xbe8a2b0a, 0xbe8c2b0c, - 0xbe8e2b0e, 0xc06b003a, - 0x00000000, 0xbf8cc07f, - 0xc06b013a, 0x00000010, - 0xbf8cc07f, 0xc06b023a, - 0x00000020, 0xbf8cc07f, - 0xc06b033a, 0x00000030, - 0xbf8cc07f, 0x8074c074, - 0x82758075, 0x807c907c, - 0xbf0a717c, 0xbf85ffe7, - 0xbef40172, 0xbefa0080, - 0xbefe00c1, 0xbeff00c1, - 0xbee80080, 0xbee90080, - 0xbef600ff, 0x01000000, - 0xe0724000, 0x7a1d0000, - 0xe0724100, 0x7a1d0100, - 0xe0724200, 0x7a1d0200, - 0xe0724300, 0x7a1d0300, - 0xbefe00c1, 0xbeff00c1, - 0xb8f14306, 0x8671c171, - 0xbf84002c, 0xbf8a0000, - 0x8670ff6f, 0x04000000, - 0xbf840028, 0x8e718671, - 0x8e718271, 0xbef60071, - 0xb8fa2a05, 0x807a817a, - 0x8e7a8a7a, 0xb8f01605, - 0x80708170, 0x8e708670, - 0x807a707a, 0x807aff7a, - 0x00000080, 0xbef600ff, - 0x01000000, 0xbefc0080, - 0xd28c0002, 0x000100c1, - 0xd28d0003, 0x000204c1, - 0xd1060002, 0x00011103, - 0x7e0602ff, 0x00000200, - 0xbefc00ff, 0x00010000, - 0xbe800077, 0x8677ff77, - 0xff7fffff, 0x8777ff77, - 0x00058000, 0xd8ec0000, - 0x00000002, 0xbf8cc07f, - 0xe0765000, 0x7a1d0002, - 0x68040702, 0xd0c9006a, - 0x0000e302, 0xbf87fff7, - 0xbef70000, 0xbefa00ff, - 0x00000400, 0xbefe00c1, - 0xbeff00c1, 0xb8f12a05, - 0x80718171, 0x8e718271, - 0x8e768871, 0xbef600ff, - 0x01000000, 0xbefc0084, - 0xbf0a717c, 0xbf840015, - 0xbf11017c, 0x8071ff71, - 0x00001000, 0x7e000300, - 0x7e020301, 0x7e040302, - 0x7e060303, 0xe0724000, - 0x7a1d0000, 0xe0724100, - 0x7a1d0100, 0xe0724200, - 0x7a1d0200, 0xe0724300, - 0x7a1d0300, 0x807c847c, - 0x807aff7a, 0x00000400, - 0xbf0a717c, 0xbf85ffef, - 0xbf9c0000, 0xbf8200d9, - 0xbef4007e, 0x8675ff7f, - 0x0000ffff, 0x8775ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x00807fac, - 0x866eff7f, 0x08000000, - 0x8f6e836e, 0x87776e77, - 0x866eff7f, 0x70000000, - 0x8f6e816e, 0x87776e77, - 0x866eff7f, 0x04000000, - 0xbf84001e, 0xbefe00c1, - 0xbeff00c1, 0xb8ef4306, - 0x866fc16f, 0xbf840019, - 0x8e6f866f, 0x8e6f826f, - 0xbef6006f, 0xb8f82a05, - 0x80788178, 0x8e788a78, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0x8078ff78, 0x00000080, - 0xbef600ff, 0x01000000, - 0xbefc0080, 0xe0510000, - 0x781d0000, 0xe0510100, - 0x781d0000, 0x807cff7c, - 0x00000200, 0x8078ff78, - 0x00000200, 0xbf0a6f7c, - 0xbf85fff6, 0xbef80080, - 0xbefe00c1, 0xbeff00c1, - 0xb8ef2a05, 0x806f816f, - 0x8e6f826f, 0x8e76886f, - 0xbef600ff, 0x01000000, - 0xbeee0078, 0x8078ff78, - 0x00000400, 0xbefc0084, - 0xbf11087c, 0x806fff6f, - 0x00008000, 0xe0524000, - 0x781d0000, 0xe0524100, - 0x781d0100, 0xe0524200, - 0x781d0200, 0xe0524300, - 0x781d0300, 0xbf8c0f70, - 0x7e000300, 0x7e020301, - 0x7e040302, 0x7e060303, - 0x807c847c, 0x8078ff78, - 0x00000400, 0xbf0a6f7c, - 0xbf85ffee, 0xbf9c0000, - 0xe0524000, 0x6e1d0000, - 0xe0524100, 0x6e1d0100, - 0xe0524200, 0x6e1d0200, - 0xe0524300, 0x6e1d0300, - 0xb8f82a05, 0x80788178, - 0x8e788a78, 0xb8ee1605, - 0x806e816e, 0x8e6e866e, - 0x80786e78, 0x80f8c078, - 0xb8ef1605, 0x806f816f, - 0x8e6f846f, 0x8e76826f, - 0xbef600ff, 0x01000000, - 0xbefc006f, 0xc031003a, - 0x00000078, 0x80f8c078, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe802d00, - 0xbe822d02, 0xbe842d04, - 0xbe862d06, 0xbe882d08, - 0xbe8a2d0a, 0xbe8c2d0c, - 0xbe8e2d0e, 0xbf06807c, - 0xbf84fff0, 0xb8f82a05, - 0x80788178, 0x8e788a78, - 0xb8ee1605, 0x806e816e, - 0x8e6e866e, 0x80786e78, - 0xbef60084, 0xbef600ff, - 0x01000000, 0xc0211bfa, - 0x00000078, 0x80788478, - 0xc0211b3a, 0x00000078, - 0x80788478, 0xc0211b7a, - 0x00000078, 0x80788478, - 0xc0211eba, 0x00000078, - 0x80788478, 0xc0211efa, - 0x00000078, 0x80788478, - 0xc0211c3a, 0x00000078, - 0x80788478, 0xc0211c7a, - 0x00000078, 0x80788478, - 0xc0211a3a, 0x00000078, - 0x80788478, 0xc0211a7a, - 0x00000078, 0x80788478, - 0xc0211cfa, 0x00000078, - 0x80788478, 0xbf8cc07f, - 0x866dff6d, 0x0000ffff, - 0xbefc006f, 0xbefe007a, - 0xbeff007b, 0x866f71ff, - 0x000003ff, 0xb96f4803, - 0x866f71ff, 0xfffff800, - 0x8f6f8b6f, 0xb96fa2c3, - 0xb973f801, 0xb8ee2a05, - 0x806e816e, 0x8e6e8a6e, - 0xb8ef1605, 0x806f816f, - 0x8e6f866f, 0x806e6f6e, - 0x806e746e, 0x826f8075, - 0x866fff6f, 0x0000ffff, - 0xc0071cb7, 0x00000040, - 0xc00b1d37, 0x00000048, - 0xc0031e77, 0x00000058, - 0xc0071eb7, 0x0000005c, - 0xbf8cc07f, 0x866fff6d, - 0xf0000000, 0x8f6f9c6f, - 0x8e6f906f, 0xbeee0080, - 0x876e6f6e, 0x866fff6d, - 0x08000000, 0x8f6f9b6f, - 0x8e6f8f6f, 0x876e6f6e, - 0x866fff70, 0x00800000, - 0x8f6f976f, 0xb96ef807, - 0x86fe7e7e, 0x86ea6a6a, - 0xb970f802, 0xbf8a0000, - 0x95806f6c, 0xbf810000, -}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index c1d9e2772cbc..7ee6cec2c060 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -26,8 +26,7 @@ #include "kfd_priv.h" #include "kfd_device_queue_manager.h" #include "kfd_pm4_headers_vi.h" -#include "cwsr_trap_handler_gfx8.asm" -#include "cwsr_trap_handler_gfx9.asm" +#include "cwsr_trap_handler.h" #include "kfd_iommu.h" #define MQD_SIZE_ALIGNED 768 -- cgit v1.2.1 From f8ea72d097965617bba0d6773fd29d44070c5e1a Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Tue, 1 May 2018 17:56:07 -0400 Subject: drm/amdkfd: Fix CP soft hang on APUs The problem happens on Raven and Carrizo. The context save handler should not clear the high bits of PC_HI before extracting the bits of IB_STS. The bug is not relevant to VEGA10 until we enable demand paging. Signed-off-by: Jay Cornwall Signed-off-by: Yong Zhao Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm | 3 +-- drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index a546a219d025..f68aef02fc1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -253,7 +253,6 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { 0x00000072, 0x80728472, 0xc0211b7c, 0x00000072, 0x80728472, 0xbf8c007f, - 0x8671ff71, 0x0000ffff, 0xbefc0073, 0xbefe006e, 0xbeff006f, 0x867375ff, 0x000003ff, 0xb9734803, @@ -267,6 +266,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = { 0x8e738f73, 0x87767376, 0x8673ff74, 0x00800000, 0x8f739773, 0xb976f807, + 0x8671ff71, 0x0000ffff, 0x86fe7e7e, 0x86ea6a6a, 0xb974f802, 0xbf8a0000, 0x95807370, 0xbf810000, @@ -530,7 +530,6 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x00000078, 0x80788478, 0xc0211cfa, 0x00000078, 0x80788478, 0xbf8cc07f, - 0x866dff6d, 0x0000ffff, 0xbefc006f, 0xbefe007a, 0xbeff007b, 0x866f71ff, 0x000003ff, 0xb96f4803, @@ -554,6 +553,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = { 0x8e6f8f6f, 0x876e6f6e, 0x866fff70, 0x00800000, 0x8f6f976f, 0xb96ef807, + 0x866dff6d, 0x0000ffff, 0x86fe7e7e, 0x86ea6a6a, 0xb970f802, 0xbf8a0000, 0x95806f6c, 0xbf810000, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm index 658a4c6be8e4..a2a04bb64096 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm @@ -1015,8 +1015,6 @@ end s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) @@ -1052,6 +1050,7 @@ end s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp + s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm index 065f55ae9e41..998be96be736 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm @@ -1067,8 +1067,6 @@ end s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS - s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS - //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise: if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore) @@ -1119,6 +1117,7 @@ end s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp + s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32 s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32 s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu -- cgit v1.2.1 From eeb27b7eb3826c23cc5688c47845e7309f20fc32 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 1 May 2018 17:56:08 -0400 Subject: drm/amdkfd: Fix signal handling performance again It turns out that idr_for_each_entry is really slow compared to just iterating over the slots. Based on measurements the difference is estimated to be about a factor 64. That means using idr_for_each_entry is only worth it with very few allocated events. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_events.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index bccf2f761177..5562e94e786a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -496,7 +496,7 @@ void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n", partial_id, valid_id_bits); - if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) { + if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) { /* With relatively few events, it's faster to * iterate over the event IDR */ -- cgit v1.2.1 From ccb76b149e1c849c0aee6b5043aed74d41064ad6 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 1 May 2018 17:56:09 -0400 Subject: drm/amdkfd: Remove initialization of cp_hqd_ib_control on CIK The initialization is not necessary. amd-kfd-staging and ROCm releases have worked without it for two years. Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 2bc49c62cc8c..06eaa218eba6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -79,10 +79,6 @@ static int init_mqd(struct mqd_manager *mm, void **mqd, m->cp_mqd_base_addr_lo = lower_32_bits(addr); m->cp_mqd_base_addr_hi = upper_32_bits(addr); - m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN; - /* Although WinKFD writes this, I suspect it should not be necessary */ - m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE; - m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS | QUANTUM_DURATION(10); -- cgit v1.2.1 From bfdcbfd25516eba6cd7b9862779a325ec26006ad Mon Sep 17 00:00:00 2001 From: Ben Goz Date: Tue, 1 May 2018 17:56:10 -0400 Subject: drm/amdkfd: Locking PM mutex while allocating IB buffer Signed-off-by: Ben Goz Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 91f0350b6180..c317feb43f69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -94,12 +94,14 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); + mutex_lock(&pm->lock); + retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, &pm->ib_buffer_obj); if (retval) { pr_err("Failed to allocate runlist IB\n"); - return retval; + goto out; } *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; @@ -107,6 +109,9 @@ static int pm_allocate_runlist_ib(struct packet_manager *pm, memset(*rl_buffer, 0, *rl_buffer_size); pm->allocated = true; + +out: + mutex_unlock(&pm->lock); return retval; } -- cgit v1.2.1 From 2533f0741e5f7259393d7edecb4bca3106c583c2 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Tue, 1 May 2018 17:56:11 -0400 Subject: drm/amdkfd: Remove queue node when destroy queue failed HWS may hang in the middle of destroy queue, remove the queue from the process queue list so it won't be freed again in the future Signed-off-by: Shaoyun Liu Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 3045aebdc3f7..d65ce0436b31 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -241,7 +241,8 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("DQM create queue failed\n"); + pr_err("Pasid %d DQM create queue %d failed. ret %d\n", + pqm->process->pasid, type, retval); goto err_create_queue; } @@ -319,8 +320,11 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { - pr_debug("Destroy queue failed, returned %d\n", retval); - goto err_destroy_queue; + pr_err("Pasid %d destroy queue %d failed, ret %d\n", + pqm->process->pasid, + pqn->q->properties.queue_id, retval); + if (retval != -ETIME) + goto err_destroy_queue; } uninit_queue(pqn->q); } -- cgit v1.2.1 From c129db1206bd11ab0531a4d91a455a0809acae0e Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 1 May 2018 17:56:12 -0400 Subject: drm/amdkfd: Add sanity checks in IRQ handlers Only accept interrupts from KFD VMIDs. Just checking for a PASID may not be enough because amdgpu started using PASIDs to map VM faults to processes. Warn if an IRQ doesn't have a valid PASID (indicating a firmware bug). Suggested-by: Shaoyun Liu Suggested-by: Oak Zeng Signed-off-by: Felix Kuehling Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c | 20 +++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 40 ++++++++++++++---------- 2 files changed, 39 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 3d5ccb3755d4..49df6c791cfc 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -27,18 +27,28 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, const uint32_t *ih_ring_entry) { - unsigned int pasid; const struct cik_ih_ring_entry *ihre = (const struct cik_ih_ring_entry *)ih_ring_entry; + unsigned int vmid, pasid; + + /* Only handle interrupts from KFD VMIDs */ + vmid = (ihre->ring_id & 0x0000ff00) >> 8; + if (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd) + return 0; + /* If there is no valid PASID, it's likely a firmware bug */ pasid = (ihre->ring_id & 0xffff0000) >> 16; + if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt")) + return 0; - /* Do not process in ISR, just request it to be forwarded to WQ. */ - return (pasid != 0) && - (ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE || + /* Interrupt types we care about: various signals and faults. + * They will be forwarded to a work queue (see below). + */ + return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE || ihre->source_id == CIK_INTSRC_SDMA_TRAP || ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG || - ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE); + ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE; } static void cik_event_interrupt_wq(struct kfd_dev *dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 39d41155581f..37029baa3346 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -29,27 +29,35 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, const uint32_t *ih_ring_entry) { uint16_t source_id, client_id, pasid, vmid; + const uint32_t *data = ih_ring_entry; - source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); - client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); - pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd) + return 0; + + /* If there is no valid PASID, it's likely a firmware bug */ + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + if (WARN_ONCE(pasid == 0, "FW bug: No PASID in KFD interrupt")) + return 0; - if (pasid) { - const uint32_t *data = ih_ring_entry; + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); - pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n", - client_id, source_id, pasid); - pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", - data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7]); - } + pr_debug("client id 0x%x, source id %d, pasid 0x%x. raw data:\n", + client_id, source_id, pasid); + pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], + data[4], data[5], data[6], data[7]); - return (pasid != 0) && - (source_id == SOC15_INTSRC_CP_END_OF_PIPE || - source_id == SOC15_INTSRC_SDMA_TRAP || - source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || - source_id == SOC15_INTSRC_CP_BAD_OPCODE); + /* Interrupt types we care about: various signals and faults. + * They will be forwarded to a work queue (see below). + */ + return source_id == SOC15_INTSRC_CP_END_OF_PIPE || + source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || + source_id == SOC15_INTSRC_CP_BAD_OPCODE; } static void event_interrupt_wq_v9(struct kfd_dev *dev, -- cgit v1.2.1 From af47b390273f1068bdb1d01263a81948c4e2f97a Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Apr 2018 14:24:12 -0700 Subject: drm/amdkfd: Remove vla MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's an ongoing effort to remove VLAs[1] from the kernel to eventually turn on -Wvla. Switch to a constant value that covers all hardware. [1] https://lkml.org/lkml/2018/3/7/621 Reviewed-by: Felix Kuehling Acked-by: Christian König Signed-off-by: Laura Abbott Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c | 8 +++++--- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 ++ 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index 035c351f47c5..db6d9336b80d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -139,10 +139,12 @@ static void interrupt_wq(struct work_struct *work) { struct kfd_dev *dev = container_of(work, struct kfd_dev, interrupt_work); + uint32_t ih_ring_entry[KFD_MAX_RING_ENTRY_SIZE]; - uint32_t ih_ring_entry[DIV_ROUND_UP( - dev->device_info->ih_ring_entry_size, - sizeof(uint32_t))]; + if (dev->device_info->ih_ring_entry_size > sizeof(ih_ring_entry)) { + dev_err_once(kfd_chardev(), "Ring entry too small\n"); + return; + } while (dequeue_ih_ring_entry(dev, ih_ring_entry)) dev->device_info->event_interrupt_class->interrupt_wq(dev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 10d5b5445195..5e3990bb4c4b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -39,6 +39,8 @@ #include "amd_shared.h" +#define KFD_MAX_RING_ENTRY_SIZE 8 + #define KFD_SYSFS_FILE_MODE 0444 /* GPU ID hash width in bits */ -- cgit v1.2.1 From c5191133405ac317d20d23c8510416e18842031d Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 20 Apr 2018 11:05:07 -0400 Subject: drm/amd/display: Add VG12 ASIC IDs Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 9831cb5eaa7c..9b0a04f99ac8 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -113,9 +113,14 @@ #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 +#define AI_UNKNOWN 0xFF -#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN) -#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN) +#define AI_VEGA12_P_A0 20 +#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0) +#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0) + +#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) +#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) /* DCN1_0 */ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ -- cgit v1.2.1 From 60a5205fb5f3da3907b8b53561571a790e7b1e70 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Mon, 5 Mar 2018 14:59:57 -0500 Subject: drm/amd: Add BIOS smu_info v3_3 required struct def. Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 170 ++++++++++++++++++++++++++++- 1 file changed, 168 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0f5ad54d3fd3..de177ce8ca80 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -501,6 +501,32 @@ enum atom_cooling_solution_id{ LIQUID_COOLING = 0x01 }; +struct atom_firmware_info_v3_2 { + struct atom_common_table_header table_header; + uint32_t firmware_revision; + uint32_t bootup_sclk_in10khz; + uint32_t bootup_mclk_in10khz; + uint32_t firmware_capability; // enum atombios_firmware_capability + uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */ + uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address + uint16_t bootup_vddc_mv; + uint16_t bootup_vddci_mv; + uint16_t bootup_mvddc_mv; + uint16_t bootup_vddgfx_mv; + uint8_t mem_module_id; + uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */ + uint8_t reserved1[2]; + uint32_t mc_baseaddr_high; + uint32_t mc_baseaddr_low; + uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def + uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id + uint8_t board_i2c_feature_slave_addr; + uint8_t reserved3; + uint16_t bootup_mvddq_mv; + uint16_t bootup_mvpp_mv; + uint32_t zfbstartaddrin16mb; + uint32_t reserved2[3]; +}; /* *************************************************************************** @@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2 uint32_t rlc_gpu_timer_refclk; }; - +struct atom_gfx_info_v2_3 { + struct atom_common_table_header table_header; + uint8_t gfxip_min_ver; + uint8_t gfxip_max_ver; + uint8_t max_shader_engines; + uint8_t max_tile_pipes; + uint8_t max_cu_per_sh; + uint8_t max_sh_per_se; + uint8_t max_backends_per_se; + uint8_t max_texture_channel_caches; + uint32_t regaddr_cp_dma_src_addr; + uint32_t regaddr_cp_dma_src_addr_hi; + uint32_t regaddr_cp_dma_dst_addr; + uint32_t regaddr_cp_dma_dst_addr_hi; + uint32_t regaddr_cp_dma_command; + uint32_t regaddr_cp_status; + uint32_t regaddr_rlc_gpu_clock_32; + uint32_t rlc_gpu_timer_refclk; + uint8_t active_cu_per_sh; + uint8_t active_rb_per_se; + uint16_t gcgoldenoffset; + uint32_t rm21_sram_vmin_value; +}; /* *************************************************************************** @@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1 uint8_t fw_ctf_polarity; // GPIO polarity for CTF }; +struct atom_smu_info_v3_2 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t reserved[5]; +}; + +struct atom_smu_info_v3_3 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t syspll3_0_vco_freq_10khz; + uint32_t syspll3_1_vco_freq_10khz; + uint32_t bootup_fclk_10khz; + uint32_t bootup_waflclk_10khz; + uint32_t reserved[3]; +}; + /* *************************************************************************** Data Table smc_dpm_info structure @@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1 uint32_t boardreserved[10]; }; - /* *************************************************************************** Data Table asic_profiling_info structure @@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK }; +enum atom_smu11_syspll_id { + SMU11_SYSPLL0_ID = 0, + SMU11_SYSPLL1_0_ID = 1, + SMU11_SYSPLL1_1_ID = 2, + SMU11_SYSPLL1_2_ID = 3, + SMU11_SYSPLL2_ID = 4, + SMU11_SYSPLL3_0_ID = 5, + SMU11_SYSPLL3_1_ID = 6, +}; + + +enum atom_smu11_syspll0_clock_id { + SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK + SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK + SMU11_SYSPLL0_DCLK_ID = 2, // DCLK + SMU11_SYSPLL0_VCLK_ID = 3, // VCLK + SMU11_SYSPLL0_ECLK_ID = 4, // ECLK + SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK +}; + + +enum atom_smu11_syspll1_0_clock_id { + SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a +}; + +enum atom_smu11_syspll1_1_clock_id { + SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b +}; + +enum atom_smu11_syspll1_2_clock_id { + SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK +}; + +enum atom_smu11_syspll2_clock_id { + SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK +}; + +enum atom_smu11_syspll3_0_clock_id { + SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK + SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK + SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK +}; + +enum atom_smu11_syspll3_1_clock_id { + SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK + SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK + SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK +}; + struct atom_get_smu_clock_info_output_parameters_v3_1 { union { -- cgit v1.2.1 From 6e65fb862064663ad3a08f964af1e8f3f2abf688 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 20 Apr 2018 10:56:18 -0400 Subject: drm/amd/display: Add get_firmware_info_v3_2 for VG12 Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 86 +++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 985fe8c22875..10a5807a7e8b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1( struct bios_parser *bp, struct dc_firmware_info *info); +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info); + static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); @@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info( case 3: switch (revision.minor) { case 1: - case 2: result = get_firmware_info_v3_1(bp, info); break; + case 2: + result = get_firmware_info_v3_2(bp, info); + break; default: break; } @@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1( return BP_RESULT_OK; } +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info) +{ + struct atom_firmware_info_v3_2 *firmware_info; + struct atom_display_controller_info_v4_1 *dce_info = NULL; + struct atom_common_table_header *header; + struct atom_data_revision revision; + struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL; + struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL; + + if (!info) + return BP_RESULT_BADINPUT; + + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2, + DATA_TABLES(firmwareinfo)); + + dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, + DATA_TABLES(dce_info)); + + if (!firmware_info || !dce_info) + return BP_RESULT_BADBIOSTABLE; + + memset(info, 0, sizeof(*info)); + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(smu_info)); + get_atom_data_table_revision(header, &revision); + + if (revision.minor == 2) { + /* Vega12 */ + smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_2) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; + } else if (revision.minor == 3) { + /* Vega20 */ + smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_3) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; + } + + // We need to convert from 10KHz units into KHz units. + info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; + + /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */ + info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; + /* Hardcode frequency if BIOS gives no DCE Ref Clk */ + if (info->pll_info.crystal_frequency == 0) { + if (revision.minor == 2) + info->pll_info.crystal_frequency = 27000; + else if (revision.minor == 3) + info->pll_info.crystal_frequency = 100000; + } + /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ + info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; + info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; + + /* Get GPU PLL VCO Clock */ + if (bp->cmd_tbl.get_smu_clock_info != NULL) { + if (revision.minor == 2) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; + else if (revision.minor == 3) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; + } + + return BP_RESULT_OK; +} + static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, -- cgit v1.2.1 From 018d82e5f02ef3583411bcaa4e00c69786f46f19 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 24 Apr 2018 10:49:20 -0400 Subject: drm/amd/display: Don't return ddc result and read_bytes in same return value The two ranges overlap. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 20 ++++++++++++-------- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++++++--- drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 5 +++-- 3 files changed, 22 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ace9ad578ca0..4304d9e408b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? I2C_MOT_TRUE : I2C_MOT_FALSE; enum ddc_result res; - ssize_t read_bytes; + uint32_t read_bytes = msg->size; if (WARN_ON(msg->size > 16)) return -E2BIG; switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, false, I2C_MOT_UNDEF, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_NATIVE_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, msg->size); break; case DP_AUX_I2C_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, true, mot, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_I2C_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, r == DDC_RESULT_SUCESSFULL); #endif - return msg->size; + if (res != DDC_RESULT_SUCESSFULL) + return -EIO; + return read_bytes; } static enum drm_connector_status diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 49c2face1e7a..ae48d603ebd6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data( return ret; } -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len) + uint32_t len, + uint32_t *read) { struct aux_payload read_payload = { .i2c_over_aux = i2c, @@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data( .mot = mot }; + *read = 0; + if (len > DEFAULT_AUX_MAX_DATA_SIZE) { BREAK_TO_DEBUGGER(); return DDC_RESULT_FAILED_INVALID_OPERATION; @@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data( ddc->ctx->i2caux, ddc->ddc_pin, &command)) { - return (ssize_t)command.payloads->length; + *read = command.payloads->length; + return DDC_RESULT_SUCESSFULL; } return DDC_RESULT_FAILED_OPERATION; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 090b7a8dd67b..30b3a08b91be 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len); + uint32_t len, + uint32_t *read); enum ddc_result dal_ddc_service_write_dpcd_data( struct ddc_service *ddc, -- cgit v1.2.1 From bd4caed47a19f25fe8674344ea06d469c27ac314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 17 Apr 2018 12:25:22 +0200 Subject: drm/amd/display: Use kvzalloc for potentially large allocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocating up to 32 physically contiguous pages can easily fail (and has failed for me), and isn't necessary anyway. Reviewed-by: Harry Wentland Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 14 ++--- .../drm/amd/display/modules/color/color_gamma.c | 72 ++++++++++++---------- 2 files changed, 45 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ade5b8ee9c3c..132eef3826e2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -66,8 +66,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc) { struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state), - GFP_KERNEL); + struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), + GFP_KERNEL); if (NULL == plane_state) return NULL; @@ -120,7 +120,7 @@ static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); destruct(plane_state); - kfree(plane_state); + kvfree(plane_state); } void dc_plane_state_release(struct dc_plane_state *plane_state) @@ -136,7 +136,7 @@ void dc_gamma_retain(struct dc_gamma *gamma) static void dc_gamma_free(struct kref *kref) { struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); - kfree(gamma); + kvfree(gamma); } void dc_gamma_release(struct dc_gamma **gamma) @@ -147,7 +147,7 @@ void dc_gamma_release(struct dc_gamma **gamma) struct dc_gamma *dc_create_gamma(void) { - struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL); + struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL); if (gamma == NULL) goto alloc_fail; @@ -167,7 +167,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf) static void dc_transfer_func_free(struct kref *kref) { struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); - kfree(tf); + kvfree(tf); } void dc_transfer_func_release(struct dc_transfer_func *tf) @@ -177,7 +177,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf) struct dc_transfer_func *dc_create_transfer_func(void) { - struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); + struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); if (tf == NULL) goto alloc_fail; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index e7e374f56864..b3747a019deb 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1093,19 +1093,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1157,13 +1157,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(rgb_regamma); + kvfree(rgb_regamma); rgb_regamma_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; } @@ -1192,19 +1192,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1246,13 +1246,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(curve); + kvfree(curve); curve_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; @@ -1281,8 +1281,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 7; @@ -1302,11 +1303,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 0; @@ -1324,7 +1326,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } rgb_regamma_alloc_fail: return ret; @@ -1348,8 +1350,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1364,11 +1367,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1382,7 +1386,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } points->end_exponent = 0; points->x_point_at_y1_red = 1; -- cgit v1.2.1 From e6a5b9f9aee145c2f2c24431d84edfbb0d49eea5 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 30 Apr 2018 10:04:42 -0400 Subject: drm/amdgpu: Switch to interruptable wait to recover from ring hang. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Use dma_fence_wait instead of dma_fence_wait_timeout(...,MAX_SCHEDULE_TIMEOUT) Avoid printing error message for ERESTARTSYS Originally-by: David Panariti Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 09d35051fdd6..3fabf9f97022 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -419,9 +419,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) if (other) { signed long r; - r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait(other, true); if (r < 0) { - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + if (r != -ERESTARTSYS) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + return r; } } -- cgit v1.2.1 From 639f790223e62339b9cb7319ea3fae9e02c39bdb Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 7 May 2018 14:23:04 +0800 Subject: drm/amd/pp: Refine the output of pp_power_profile_mode on VI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to keep consist with Vega, the output format of the pp_power_profile_mode would be < “*” for current profile>:"detail settings" and remove the "CURRENT" mode line. for example: NUM MODE_NAME SCLK_UP_HYST SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL MCLK_UP_HYST MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL 0 3D_FULL_SCREEN: 0 100 30 0 100 10 1 POWER_SAVING: 10 0 30 - - - 2 VIDEO: - - - 10 16 31 3 VR: 0 11 50 0 100 10 4 COMPUTE: 0 5 30 - - - 5 CUSTOM *: 0 5 30 0 100 10 NUM MODE_NAME SCLK_UP_HYST SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL MCLK_UP_HYST MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL 0 3D_FULL_SCREEN: 0 100 30 0 100 10 1 POWER_SAVING *: 10 0 30 0 100 10 2 VIDEO: - - - 10 16 31 3 VR: 0 11 50 0 100 10 4 COMPUTE: 0 5 30 - - - 5 CUSTOM: - - - - - - Reviewed-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 52 +++++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 1 - 2 files changed, 23 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 26fbeafc3c96..18b5b2ff47fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -79,12 +79,13 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[5] = +static const struct profile_mode_setting smu7_profiling[6] = {{1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, {1, 0, 5, 30, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0}, }; /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ @@ -4864,6 +4865,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); for (i = 0; i < len; i++) { + if (i == hwmgr->power_profile_mode) { + size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", + i, profile_name[i], "*", + data->current_profile_setting.sclk_up_hyst, + data->current_profile_setting.sclk_down_hyst, + data->current_profile_setting.sclk_activity, + data->current_profile_setting.mclk_up_hyst, + data->current_profile_setting.mclk_down_hyst, + data->current_profile_setting.mclk_activity); + continue; + } if (smu7_profiling[i].bupdate_sclk) size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", i, profile_name[i], smu7_profiling[i].sclk_up_hyst, @@ -4883,24 +4895,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) "-", "-", "-"); } - size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], - data->custom_profile_setting.sclk_up_hyst, - data->custom_profile_setting.sclk_down_hyst, - data->custom_profile_setting.sclk_activity, - data->custom_profile_setting.mclk_up_hyst, - data->custom_profile_setting.mclk_down_hyst, - data->custom_profile_setting.mclk_activity); - - size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n", - "*", "CURRENT", - data->current_profile_setting.sclk_up_hyst, - data->current_profile_setting.sclk_down_hyst, - data->current_profile_setting.sclk_activity, - data->current_profile_setting.mclk_up_hyst, - data->current_profile_setting.mclk_down_hyst, - data->current_profile_setting.mclk_activity); - return size; } @@ -4939,16 +4933,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint if (size < 8) return -EINVAL; - data->custom_profile_setting.bupdate_sclk = input[0]; - data->custom_profile_setting.sclk_up_hyst = input[1]; - data->custom_profile_setting.sclk_down_hyst = input[2]; - data->custom_profile_setting.sclk_activity = input[3]; - data->custom_profile_setting.bupdate_mclk = input[4]; - data->custom_profile_setting.mclk_up_hyst = input[5]; - data->custom_profile_setting.mclk_down_hyst = input[6]; - data->custom_profile_setting.mclk_activity = input[7]; - if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { - memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); + tmp.bupdate_sclk = input[0]; + tmp.sclk_up_hyst = input[1]; + tmp.sclk_down_hyst = input[2]; + tmp.sclk_activity = input[3]; + tmp.bupdate_mclk = input[4]; + tmp.mclk_up_hyst = input[5]; + tmp.mclk_down_hyst = input[6]; + tmp.mclk_activity = input[7]; + if (!smum_update_dpm_settings(hwmgr, &tmp)) { + memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); hwmgr->power_profile_mode = mode; } break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f40179c9ca97..b8d0bb378595 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -325,7 +325,6 @@ struct smu7_hwmgr { uint16_t mem_latency_high; uint16_t mem_latency_low; uint32_t vr_config; - struct profile_mode_setting custom_profile_setting; struct profile_mode_setting current_profile_setting; }; -- cgit v1.2.1 From 7fc6311b174091e3283c28381e58bed3d12b6591 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 10 May 2018 19:51:09 +0800 Subject: drm/amd/pp: Fix performance drop on Fiji The performance drop if the default TDP more than 256 Watt Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 03bc7453f3b1..d9e92e306535 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -852,12 +852,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - n = (n & 0xff) << 8; - if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PkgPwrSetLimit, n); + PPSMC_MSG_PkgPwrSetLimit, n<<8); return 0; } -- cgit v1.2.1 From 0eeef69022b4ea503106f5f695fd5d8ae2c72706 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 9 Apr 2018 14:55:17 -0500 Subject: drm/amd/display: Updated HDR Static Metadata to directly take info packet raw Updated HDR Static Metadata to directly take info packet raw Updating Infopacket does not require Passive Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 91 +---------------------- drivers/gpu/drm/amd/display/dc/dc_stream.h | 4 +- 2 files changed, 5 insertions(+), 90 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index eb8f4792198c..e1036e409877 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -2315,97 +2315,12 @@ static void set_hdr_static_info_packet( struct dc_info_packet *info_packet, struct dc_stream_state *stream) { - uint16_t i = 0; - enum signal_type signal = stream->signal; - uint32_t data; + /* HDR Static Metadata info packet for HDR10 */ - if (!stream->hdr_static_metadata.hdr_supported) + if (!stream->hdr_static_metadata.valid) return; - if (dc_is_hdmi_signal(signal)) { - info_packet->valid = true; - - info_packet->hb0 = 0x87; - info_packet->hb1 = 0x01; - info_packet->hb2 = 0x1A; - i = 1; - } else if (dc_is_dp_signal(signal)) { - info_packet->valid = true; - - info_packet->hb0 = 0x00; - info_packet->hb1 = 0x87; - info_packet->hb2 = 0x1D; - info_packet->hb3 = (0x13 << 2); - i = 2; - } - - data = stream->hdr_static_metadata.is_hdr; - info_packet->sb[i++] = data ? 0x02 : 0x00; - info_packet->sb[i++] = 0x00; - - data = stream->hdr_static_metadata.chromaticity_green_x / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_green_y / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_blue_x / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_blue_y / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_red_x / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_red_y / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_white_point_x / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.chromaticity_white_point_y / 2; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.max_luminance; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.min_luminance; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.maximum_content_light_level; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - data = stream->hdr_static_metadata.maximum_frame_average_light_level; - info_packet->sb[i++] = data & 0xFF; - info_packet->sb[i++] = (data & 0xFF00) >> 8; - - if (dc_is_hdmi_signal(signal)) { - uint32_t checksum = 0; - - checksum += info_packet->hb0; - checksum += info_packet->hb1; - checksum += info_packet->hb2; - - for (i = 1; i <= info_packet->hb2; i++) - checksum += info_packet->sb[i]; - - info_packet->sb[0] = 0x100 - checksum; - } else if (dc_is_dp_signal(signal)) { - info_packet->sb[0] = 0x01; - info_packet->sb[1] = 0x1A; - } + *info_packet = stream->hdr_static_metadata; } static void set_vsc_info_packet( diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 2971cd07e093..08f1a45ed042 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -58,7 +58,7 @@ struct dc_stream_state { struct freesync_context freesync_ctx; - struct dc_hdr_static_metadata hdr_static_metadata; + struct dc_info_packet hdr_static_metadata; struct dc_transfer_func *out_transfer_func; struct colorspace_transform gamut_remap_matrix; struct csc_transform csc_color_matrix; @@ -113,8 +113,8 @@ struct dc_stream_update { struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; - struct dc_hdr_static_metadata *hdr_static_metadata; enum color_transfer_func color_output_tf; + struct dc_info_packet *hdr_static_metadata; unsigned int *abm_level; unsigned long long *periodic_fn_vsync_delta; }; -- cgit v1.2.1 From 85b25034608e861ce60b771b988967ea039a06c6 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sun, 25 Mar 2018 16:41:06 -0400 Subject: drm/amd/display: Get rid of unused input_tf Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 - drivers/gpu/drm/amd/display/dc/core/dc.c | 3 --- drivers/gpu/drm/amd/display/dc/core/dc_debug.c | 4 ---- drivers/gpu/drm/amd/display/dc/dc.h | 5 ----- 4 files changed, 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2514d7b3b66e..aa8e25a9b09e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4034,7 +4034,6 @@ static bool commit_planes_to_stream( flip_addr[i].address = plane_states[i]->address; flip_addr[i].flip_immediate = plane_states[i]->flip_immediate; plane_info[i].color_space = plane_states[i]->color_space; - plane_info[i].input_tf = plane_states[i]->input_tf; plane_info[i].format = plane_states[i]->format; plane_info[i].plane_size = plane_states[i]->plane_size; plane_info[i].rotation = plane_states[i]->rotation; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 8f09f3ab0c29..e59357724eac 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1018,9 +1018,6 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa if (u->plane_info->color_space != u->surface->color_space) update_flags->bits.color_space_change = 1; - if (u->plane_info->input_tf != u->surface->input_tf) - update_flags->bits.input_tf_change = 1; - if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) update_flags->bits.horizontal_mirror_change = 1; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index a3c87611220d..267c76766dea 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -155,7 +155,6 @@ void pre_surface_trace( "plane_state->tiling_info.gfx8.pipe_config = %d;\n" "plane_state->tiling_info.gfx8.array_mode = %d;\n" "plane_state->color_space = %d;\n" - "plane_state->input_tf = %d;\n" "plane_state->dcc.enable = %d;\n" "plane_state->format = %d;\n" "plane_state->rotation = %d;\n" @@ -163,7 +162,6 @@ void pre_surface_trace( plane_state->tiling_info.gfx8.pipe_config, plane_state->tiling_info.gfx8.array_mode, plane_state->color_space, - plane_state->input_tf, plane_state->dcc.enable, plane_state->format, plane_state->rotation, @@ -203,7 +201,6 @@ void update_surface_trace( if (update->plane_info) { SURFACE_TRACE( "plane_info->color_space = %d;\n" - "plane_info->input_tf = %d;\n" "plane_info->format = %d;\n" "plane_info->plane_size.grph.surface_pitch = %d;\n" "plane_info->plane_size.grph.surface_size.height = %d;\n" @@ -213,7 +210,6 @@ void update_surface_trace( "plane_info->rotation = %d;\n" "plane_info->stereo_format = %d;\n", update->plane_info->color_space, - update->plane_info->input_tf, update->plane_info->format, update->plane_info->plane_size.grph.surface_pitch, update->plane_info->plane_size.grph.surface_size.height, diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 23349148c7a4..6a47da30b281 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -333,7 +333,6 @@ enum { TRANSFER_FUNC_POINTS = 1025 }; -// Moved here from color module for linux enum color_transfer_func { transfer_func_unknown, transfer_func_srgb, @@ -420,7 +419,6 @@ union surface_update_flags { /* Medium updates */ uint32_t dcc_change:1; uint32_t color_space_change:1; - uint32_t input_tf_change:1; uint32_t horizontal_mirror_change:1; uint32_t per_pixel_alpha_change:1; uint32_t rotation_change:1; @@ -470,7 +468,6 @@ struct dc_plane_state { struct dc_hdr_static_metadata hdr_static_ctx; enum dc_color_space color_space; - enum color_transfer_func input_tf; enum surface_pixel_format format; enum dc_rotation_angle rotation; @@ -500,7 +497,6 @@ struct dc_plane_info { enum dc_rotation_angle rotation; enum plane_stereo_format stereo_format; enum dc_color_space color_space; - enum color_transfer_func input_tf; unsigned int sdr_white_level; bool horizontal_mirror; bool visible; @@ -527,7 +523,6 @@ struct dc_surface_update { * null means no updates */ struct dc_gamma *gamma; - enum color_transfer_func color_input_tf; struct dc_transfer_func *in_transfer_func; struct csc_transform *input_csc_color_matrix; -- cgit v1.2.1 From 477c000ece26a588752c9d1ed9904097e95de8c9 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sun, 25 Mar 2018 16:55:05 -0400 Subject: drm/amd/display: Remove unused fields Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 6a47da30b281..5b81ae5acdf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -361,9 +361,6 @@ struct dc_hdr_static_metadata { uint32_t max_luminance; uint32_t maximum_content_light_level; uint32_t maximum_frame_average_light_level; - - bool hdr_supported; - bool is_hdr; }; enum dc_transfer_func_type { -- cgit v1.2.1 From 5c6161162a556e2260d3c9f61f2c02bd82ee1ae9 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 26 Mar 2018 16:14:31 -0400 Subject: drm/amd/display: Do not use os types Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 158 ++++++++++----------- drivers/gpu/drm/amd/display/include/fixed31_32.h | 40 +++--- 2 files changed, 98 insertions(+), 100 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index 8a9bba879207..7191c3213743 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -26,13 +26,13 @@ #include "dm_services.h" #include "include/fixed31_32.h" -static inline uint64_t abs_i64( - int64_t arg) +static inline unsigned long long abs_i64( + long long arg) { if (arg > 0) - return (uint64_t)arg; + return (unsigned long long)arg; else - return (uint64_t)(-arg); + return (unsigned long long)(-arg); } /* @@ -40,12 +40,12 @@ static inline uint64_t abs_i64( * result = dividend / divisor * *remainder = dividend % divisor */ -static inline uint64_t complete_integer_division_u64( - uint64_t dividend, - uint64_t divisor, - uint64_t *remainder) +static inline unsigned long long complete_integer_division_u64( + unsigned long long dividend, + unsigned long long divisor, + unsigned long long *remainder) { - uint64_t result; + unsigned long long result; ASSERT(divisor); @@ -65,29 +65,29 @@ static inline uint64_t complete_integer_division_u64( (FRACTIONAL_PART_MASK & (x)) struct fixed31_32 dal_fixed31_32_from_fraction( - int64_t numerator, - int64_t denominator) + long long numerator, + long long denominator) { struct fixed31_32 res; bool arg1_negative = numerator < 0; bool arg2_negative = denominator < 0; - uint64_t arg1_value = arg1_negative ? -numerator : numerator; - uint64_t arg2_value = arg2_negative ? -denominator : denominator; + unsigned long long arg1_value = arg1_negative ? -numerator : numerator; + unsigned long long arg2_value = arg2_negative ? -denominator : denominator; - uint64_t remainder; + unsigned long long remainder; /* determine integer part */ - uint64_t res_value = complete_integer_division_u64( + unsigned long long res_value = complete_integer_division_u64( arg1_value, arg2_value, &remainder); ASSERT(res_value <= LONG_MAX); /* determine fractional part */ { - uint32_t i = FIXED31_32_BITS_PER_FRACTIONAL_PART; + unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART; do { remainder <<= 1; @@ -103,14 +103,14 @@ struct fixed31_32 dal_fixed31_32_from_fraction( /* round up LSB */ { - uint64_t summand = (remainder << 1) >= arg2_value; + unsigned long long summand = (remainder << 1) >= arg2_value; ASSERT(res_value <= LLONG_MAX - summand); res_value += summand; } - res.value = (int64_t)res_value; + res.value = (long long)res_value; if (arg1_negative ^ arg2_negative) res.value = -res.value; @@ -119,7 +119,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction( } struct fixed31_32 dal_fixed31_32_from_int_nonconst( - int64_t arg) + long long arg) { struct fixed31_32 res; @@ -132,7 +132,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst( struct fixed31_32 dal_fixed31_32_shl( struct fixed31_32 arg, - uint8_t shift) + unsigned char shift) { struct fixed31_32 res; @@ -181,16 +181,16 @@ struct fixed31_32 dal_fixed31_32_mul( bool arg1_negative = arg1.value < 0; bool arg2_negative = arg2.value < 0; - uint64_t arg1_value = arg1_negative ? -arg1.value : arg1.value; - uint64_t arg2_value = arg2_negative ? -arg2.value : arg2.value; + unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value; + unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value; - uint64_t arg1_int = GET_INTEGER_PART(arg1_value); - uint64_t arg2_int = GET_INTEGER_PART(arg2_value); + unsigned long long arg1_int = GET_INTEGER_PART(arg1_value); + unsigned long long arg2_int = GET_INTEGER_PART(arg2_value); - uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value); - uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value); + unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value); + unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value); - uint64_t tmp; + unsigned long long tmp; res.value = arg1_int * arg2_int; @@ -200,22 +200,22 @@ struct fixed31_32 dal_fixed31_32_mul( tmp = arg1_int * arg2_fra; - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg2_int * arg1_fra; - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg1_fra * arg2_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + - (tmp >= (uint64_t)dal_fixed31_32_half.value); + (tmp >= (unsigned long long)dal_fixed31_32_half.value); - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -230,13 +230,13 @@ struct fixed31_32 dal_fixed31_32_sqr( { struct fixed31_32 res; - uint64_t arg_value = abs_i64(arg.value); + unsigned long long arg_value = abs_i64(arg.value); - uint64_t arg_int = GET_INTEGER_PART(arg_value); + unsigned long long arg_int = GET_INTEGER_PART(arg_value); - uint64_t arg_fra = GET_FRACTIONAL_PART(arg_value); + unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value); - uint64_t tmp; + unsigned long long tmp; res.value = arg_int * arg_int; @@ -246,20 +246,20 @@ struct fixed31_32 dal_fixed31_32_sqr( tmp = arg_int * arg_fra; - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; tmp = arg_fra * arg_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + - (tmp >= (uint64_t)dal_fixed31_32_half.value); + (tmp >= (unsigned long long)dal_fixed31_32_half.value); - ASSERT(tmp <= (uint64_t)(LLONG_MAX - res.value)); + ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); res.value += tmp; @@ -288,7 +288,7 @@ struct fixed31_32 dal_fixed31_32_sinc( struct fixed31_32 res = dal_fixed31_32_one; - int32_t n = 27; + int n = 27; struct fixed31_32 arg_norm = arg; @@ -299,7 +299,7 @@ struct fixed31_32 dal_fixed31_32_sinc( arg_norm, dal_fixed31_32_mul_int( dal_fixed31_32_two_pi, - (int32_t)div64_s64( + (int)div64_s64( arg_norm.value, dal_fixed31_32_two_pi.value))); } @@ -343,7 +343,7 @@ struct fixed31_32 dal_fixed31_32_cos( struct fixed31_32 res = dal_fixed31_32_one; - int32_t n = 26; + int n = 26; do { res = dal_fixed31_32_sub( @@ -370,7 +370,7 @@ struct fixed31_32 dal_fixed31_32_cos( static struct fixed31_32 fixed31_32_exp_from_taylor_series( struct fixed31_32 arg) { - uint32_t n = 9; + unsigned int n = 9; struct fixed31_32 res = dal_fixed31_32_from_fraction( n + 2, @@ -409,7 +409,7 @@ struct fixed31_32 dal_fixed31_32_exp( if (dal_fixed31_32_le( dal_fixed31_32_ln2_div_2, dal_fixed31_32_abs(arg))) { - int32_t m = dal_fixed31_32_round( + int m = dal_fixed31_32_round( dal_fixed31_32_div( arg, dal_fixed31_32_ln2)); @@ -429,7 +429,7 @@ struct fixed31_32 dal_fixed31_32_exp( if (m > 0) return dal_fixed31_32_shl( fixed31_32_exp_from_taylor_series(r), - (uint8_t)m); + (unsigned char)m); else return dal_fixed31_32_div_int( fixed31_32_exp_from_taylor_series(r), @@ -482,50 +482,50 @@ struct fixed31_32 dal_fixed31_32_pow( arg2)); } -int32_t dal_fixed31_32_floor( +int dal_fixed31_32_floor( struct fixed31_32 arg) { - uint64_t arg_value = abs_i64(arg.value); + unsigned long long arg_value = abs_i64(arg.value); if (arg.value >= 0) - return (int32_t)GET_INTEGER_PART(arg_value); + return (int)GET_INTEGER_PART(arg_value); else - return -(int32_t)GET_INTEGER_PART(arg_value); + return -(int)GET_INTEGER_PART(arg_value); } -int32_t dal_fixed31_32_round( +int dal_fixed31_32_round( struct fixed31_32 arg) { - uint64_t arg_value = abs_i64(arg.value); + unsigned long long arg_value = abs_i64(arg.value); - const int64_t summand = dal_fixed31_32_half.value; + const long long summand = dal_fixed31_32_half.value; - ASSERT(LLONG_MAX - (int64_t)arg_value >= summand); + ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; if (arg.value >= 0) - return (int32_t)GET_INTEGER_PART(arg_value); + return (int)GET_INTEGER_PART(arg_value); else - return -(int32_t)GET_INTEGER_PART(arg_value); + return -(int)GET_INTEGER_PART(arg_value); } -int32_t dal_fixed31_32_ceil( +int dal_fixed31_32_ceil( struct fixed31_32 arg) { - uint64_t arg_value = abs_i64(arg.value); + unsigned long long arg_value = abs_i64(arg.value); - const int64_t summand = dal_fixed31_32_one.value - + const long long summand = dal_fixed31_32_one.value - dal_fixed31_32_epsilon.value; - ASSERT(LLONG_MAX - (int64_t)arg_value >= summand); + ASSERT(LLONG_MAX - (long long)arg_value >= summand); arg_value += summand; if (arg.value >= 0) - return (int32_t)GET_INTEGER_PART(arg_value); + return (int)GET_INTEGER_PART(arg_value); else - return -(int32_t)GET_INTEGER_PART(arg_value); + return -(int)GET_INTEGER_PART(arg_value); } /* this function is a generic helper to translate fixed point value to @@ -535,15 +535,15 @@ int32_t dal_fixed31_32_ceil( * part in 32 bits. It is used in hw programming (scaler) */ -static inline uint32_t ux_dy( - int64_t value, - uint32_t integer_bits, - uint32_t fractional_bits) +static inline unsigned int ux_dy( + long long value, + unsigned int integer_bits, + unsigned int fractional_bits) { /* 1. create mask of integer part */ - uint32_t result = (1 << integer_bits) - 1; + unsigned int result = (1 << integer_bits) - 1; /* 2. mask out fractional part */ - uint32_t fractional_part = FRACTIONAL_PART_MASK & value; + unsigned int fractional_part = FRACTIONAL_PART_MASK & value; /* 3. shrink fixed point integer part to be of integer_bits width*/ result &= GET_INTEGER_PART(value); /* 4. make space for fractional part to be filled in after integer */ @@ -554,13 +554,13 @@ static inline uint32_t ux_dy( return result | fractional_part; } -static inline uint32_t clamp_ux_dy( - int64_t value, - uint32_t integer_bits, - uint32_t fractional_bits, - uint32_t min_clamp) +static inline unsigned int clamp_ux_dy( + long long value, + unsigned int integer_bits, + unsigned int fractional_bits, + unsigned int min_clamp) { - uint32_t truncated_val = ux_dy(value, integer_bits, fractional_bits); + unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits); if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART))) return (1 << (integer_bits + fractional_bits)) - 1; @@ -570,35 +570,35 @@ static inline uint32_t clamp_ux_dy( return min_clamp; } -uint32_t dal_fixed31_32_u2d19( +unsigned int dal_fixed31_32_u2d19( struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); } -uint32_t dal_fixed31_32_u0d19( +unsigned int dal_fixed31_32_u0d19( struct fixed31_32 arg) { return ux_dy(arg.value, 0, 19); } -uint32_t dal_fixed31_32_clamp_u0d14( +unsigned int dal_fixed31_32_clamp_u0d14( struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 14, 1); } -uint32_t dal_fixed31_32_clamp_u0d10( +unsigned int dal_fixed31_32_clamp_u0d10( struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 10, 1); } -int32_t dal_fixed31_32_s4d19( +int dal_fixed31_32_s4d19( struct fixed31_32 arg) { if (arg.value < 0) - return -(int32_t)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19); + return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19); else return ux_dy(arg.value, 4, 19); } diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 0de258622c12..16cbdb43d856 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -26,8 +26,6 @@ #ifndef __DAL_FIXED31_32_H__ #define __DAL_FIXED31_32_H__ -#include "os_types.h" - #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32 /* @@ -44,7 +42,7 @@ */ struct fixed31_32 { - int64_t value; + long long value; }; /* @@ -73,15 +71,15 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL }; * result = numerator / denominator */ struct fixed31_32 dal_fixed31_32_from_fraction( - int64_t numerator, - int64_t denominator); + long long numerator, + long long denominator); /* * @brief * result = arg */ -struct fixed31_32 dal_fixed31_32_from_int_nonconst(int64_t arg); -static inline struct fixed31_32 dal_fixed31_32_from_int(int64_t arg) +struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg); +static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg) { if (__builtin_constant_p(arg)) { struct fixed31_32 res; @@ -213,7 +211,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp( */ struct fixed31_32 dal_fixed31_32_shl( struct fixed31_32 arg, - uint8_t shift); + unsigned char shift); /* * @brief @@ -221,7 +219,7 @@ struct fixed31_32 dal_fixed31_32_shl( */ static inline struct fixed31_32 dal_fixed31_32_shr( struct fixed31_32 arg, - uint8_t shift) + unsigned char shift) { struct fixed31_32 res; res.value = arg.value >> shift; @@ -246,7 +244,7 @@ struct fixed31_32 dal_fixed31_32_add( * result = arg1 + arg2 */ static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1, - int32_t arg2) + int arg2) { return dal_fixed31_32_add(arg1, dal_fixed31_32_from_int(arg2)); @@ -265,7 +263,7 @@ struct fixed31_32 dal_fixed31_32_sub( * result = arg1 - arg2 */ static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1, - int32_t arg2) + int arg2) { return dal_fixed31_32_sub(arg1, dal_fixed31_32_from_int(arg2)); @@ -291,7 +289,7 @@ struct fixed31_32 dal_fixed31_32_mul( * result = arg1 * arg2 */ static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1, - int32_t arg2) + int arg2) { return dal_fixed31_32_mul(arg1, dal_fixed31_32_from_int(arg2)); @@ -309,7 +307,7 @@ struct fixed31_32 dal_fixed31_32_sqr( * result = arg1 / arg2 */ static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1, - int64_t arg2) + long long arg2) { return dal_fixed31_32_from_fraction(arg1.value, dal_fixed31_32_from_int(arg2).value); @@ -434,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow( * @brief * result = floor(arg) := greatest integer lower than or equal to arg */ -int32_t dal_fixed31_32_floor( +int dal_fixed31_32_floor( struct fixed31_32 arg); /* * @brief * result = round(arg) := integer nearest to arg */ -int32_t dal_fixed31_32_round( +int dal_fixed31_32_round( struct fixed31_32 arg); /* * @brief * result = ceil(arg) := lowest integer greater than or equal to arg */ -int32_t dal_fixed31_32_ceil( +int dal_fixed31_32_ceil( struct fixed31_32 arg); /* the following two function are used in scaler hw programming to convert fixed @@ -457,20 +455,20 @@ int32_t dal_fixed31_32_ceil( * fractional */ -uint32_t dal_fixed31_32_u2d19( +unsigned int dal_fixed31_32_u2d19( struct fixed31_32 arg); -uint32_t dal_fixed31_32_u0d19( +unsigned int dal_fixed31_32_u0d19( struct fixed31_32 arg); -uint32_t dal_fixed31_32_clamp_u0d14( +unsigned int dal_fixed31_32_clamp_u0d14( struct fixed31_32 arg); -uint32_t dal_fixed31_32_clamp_u0d10( +unsigned int dal_fixed31_32_clamp_u0d10( struct fixed31_32 arg); -int32_t dal_fixed31_32_s4d19( +int dal_fixed31_32_s4d19( struct fixed31_32 arg); #endif -- cgit v1.2.1 From 586f27a3c2b04e041c2d51c6c2374fbfa3075407 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 26 Mar 2018 16:19:18 -0400 Subject: drm/amd/display: csc_transform to dc_csc_transform Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 4 ++-- drivers/gpu/drm/amd/display/dc/dc_hw_types.h | 5 +++++ drivers/gpu/drm/amd/display/dc/dc_stream.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 5 ----- drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h | 2 +- drivers/gpu/drm/amd/display/dc/inc/hw/transform.h | 2 +- 9 files changed, 13 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5b81ae5acdf4..588672cbfbf2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -457,7 +457,7 @@ struct dc_plane_state { struct dc_gamma *gamma_correction; struct dc_transfer_func *in_transfer_func; struct dc_bias_and_scale *bias_and_scale; - struct csc_transform input_csc_color_matrix; + struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; uint32_t sdr_white_level; @@ -522,7 +522,7 @@ struct dc_surface_update { struct dc_gamma *gamma; struct dc_transfer_func *in_transfer_func; - struct csc_transform *input_csc_color_matrix; + struct dc_csc_transform *input_csc_color_matrix; struct fixed31_32 *coeff_reduction_factor; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index b83a7dc2f5a9..b1f70579d61b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -423,6 +423,11 @@ enum dc_gamma_type { GAMMA_CS_TFM_1D = 3, }; +struct dc_csc_transform { + uint16_t matrix[12]; + bool enable_adjustment; +}; + struct dc_gamma { struct kref refcount; enum dc_gamma_type type; diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 08f1a45ed042..ed3c39f132fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -61,7 +61,7 @@ struct dc_stream_state { struct dc_info_packet hdr_static_metadata; struct dc_transfer_func *out_transfer_func; struct colorspace_transform gamut_remap_matrix; - struct csc_transform csc_color_matrix; + struct dc_csc_transform csc_color_matrix; enum dc_color_space output_color_space; enum dc_dither_option dither_option; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 5f40a7374c02..c008a71ebc4e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -264,7 +264,7 @@ void dpp1_cnv_setup ( struct dpp *dpp_base, enum surface_pixel_format format, enum expansion_mode mode, - struct csc_transform input_csc_color_matrix, + struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space) { uint32_t pixel_format; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 9b5ff76a8027..3fccf9959305 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1451,7 +1451,7 @@ void dpp1_cnv_setup ( struct dpp *dpp_base, enum surface_pixel_format format, enum expansion_mode mode, - struct csc_transform input_csc_color_matrix, + struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space); void dpp1_full_bypass(struct dpp *dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index 99995608b620..bb7af1b1c7b3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -117,7 +117,7 @@ struct dpp_funcs { struct dpp *dpp_base, enum surface_pixel_format format, enum expansion_mode mode, - struct csc_transform input_csc_color_matrix, + struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space); void (*dpp_full_bypass)(struct dpp *dpp_base); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 93da44527d2e..9fe73028d588 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -140,11 +140,6 @@ enum opp_regamma { OPP_REGAMMA_USER }; -struct csc_transform { - uint16_t matrix[12]; - bool enable_adjustment; -}; - struct dc_bias_and_scale { uint16_t scale_red; uint16_t bias_red; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h index 2109eac20a3d..b2fa4c4cd920 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/ipp.h @@ -87,7 +87,7 @@ struct ipp_funcs { struct input_pixel_processor *ipp, enum surface_pixel_format format, enum expansion_mode mode, - struct csc_transform input_csc_color_matrix, + struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space); /* DCE function to setup IPP. TODO: see if we can consolidate to setup */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h index c5b3623bcbd9..fecc80c47c26 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h @@ -252,7 +252,7 @@ struct transform_funcs { struct transform *xfm_base, enum surface_pixel_format format, enum expansion_mode mode, - struct csc_transform input_csc_color_matrix, + struct dc_csc_transform input_csc_color_matrix, enum dc_color_space input_color_space); void (*ipp_full_bypass)(struct transform *xfm_base); -- cgit v1.2.1 From a2e8f540c4efa7a8f180c910d202469b3ed4f5ba Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 9 Apr 2018 14:57:47 -0500 Subject: drm/amd/display: Refactor color module Remove some unnecessary TF definitions from update structures Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_stream.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index ed3c39f132fd..d7e6d53bb383 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -67,7 +67,6 @@ struct dc_stream_state { enum dc_dither_option dither_option; enum view_3d_format view_format; - enum color_transfer_func output_tf; bool ignore_msa_timing_param; @@ -113,9 +112,9 @@ struct dc_stream_update { struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; - enum color_transfer_func color_output_tf; struct dc_info_packet *hdr_static_metadata; unsigned int *abm_level; + unsigned long long *periodic_fn_vsync_delta; }; -- cgit v1.2.1 From 28177772cbf693a6960e92bba1f08a0e78acb048 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Mon, 26 Mar 2018 16:29:51 -0400 Subject: drm/amd/display: move color_transfer_func to color mod Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 588672cbfbf2..0f566a1ba35b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -333,19 +333,6 @@ enum { TRANSFER_FUNC_POINTS = 1025 }; -enum color_transfer_func { - transfer_func_unknown, - transfer_func_srgb, - transfer_func_bt709, - transfer_func_pq2084, - transfer_func_pq2084_interim, - transfer_func_linear_0_1, - transfer_func_linear_0_125, - transfer_func_dolbyvision, - transfer_func_gamma_22, - transfer_func_gamma_26 -}; - struct dc_hdr_static_metadata { /* display chromaticities and white point in units of 0.00001 */ unsigned int chromaticity_green_x; @@ -693,6 +680,7 @@ struct dc_cursor { struct dc_cursor_attributes attributes; }; + /******************************************************************************* * Interrupt interfaces ******************************************************************************/ -- cgit v1.2.1 From 754e3673201eb192be68b8a07f2e448d75f69dfe Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 27 Mar 2018 23:12:21 -0400 Subject: drm/amd/display: Fix structure initialization of hdmi_info_packet Signed-off-by: Anthony Koo Reviewed-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index e1036e409877..8d7bc1fa9ffe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1853,11 +1853,13 @@ static void set_avi_info_frame( unsigned int cn0_cn1_value = 0; uint8_t *check_sum = NULL; uint8_t byte_index = 0; - union hdmi_info_packet hdmi_info = {0}; + union hdmi_info_packet hdmi_info; union display_content_support support = {0}; unsigned int vic = pipe_ctx->stream->timing.vic; enum dc_timing_3d_format format; + memset(&hdmi_info, 0, sizeof(union hdmi_info_packet)); + color_space = pipe_ctx->stream->output_color_space; if (color_space == COLOR_SPACE_UNKNOWN) color_space = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? -- cgit v1.2.1 From e43a432c018a9a2c2641e1f8c08a836cc83982cd Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 27 Mar 2018 16:43:56 -0400 Subject: drm/amd/display: Have DC manage its own allocation of gamma Creating plane will also allocate gamma and input TF Creating stream will also allocate outputTF Fix issue with gamma not applied OS may call SetGamma before surface committed, so need to store in target and apply later. Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++---- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 6 ++++-- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 8 +++++++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 ++--- 4 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index aa8e25a9b09e..18f221b0349d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2200,7 +2200,6 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, const struct drm_connector *connector) { struct dc_crtc_timing *timing_out = &stream->timing; - struct dc_transfer_func *tf = dc_create_transfer_func(); memset(timing_out, 0, sizeof(struct dc_crtc_timing)); @@ -2244,9 +2243,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream, stream->output_color_space = get_output_color_space(timing_out); - tf->type = TF_TYPE_PREDEFINED; - tf->tf = TRANSFER_FUNCTION_SRGB; - stream->out_transfer_func = tf; + stream->out_transfer_func->type = TF_TYPE_PREDEFINED; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; } static void fill_audio_info(struct audio_info *audio_info, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index ce0747ed0f00..3b2ddbd8c054 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -101,14 +101,16 @@ static void construct(struct dc_stream_state *stream, stream->status.link = stream->sink->link; update_stream_signal(stream); + + stream->out_transfer_func = dc_create_transfer_func(); + stream->out_transfer_func->type = TF_TYPE_BYPASS; } static void destruct(struct dc_stream_state *stream) { dc_sink_release(stream->sink); if (stream->out_transfer_func != NULL) { - dc_transfer_func_release( - stream->out_transfer_func); + dc_transfer_func_release(stream->out_transfer_func); stream->out_transfer_func = NULL; } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index ade5b8ee9c3c..959387705965 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -38,6 +38,12 @@ static void construct(struct dc_context *ctx, struct dc_plane_state *plane_state) { plane_state->ctx = ctx; + + plane_state->gamma_correction = dc_create_gamma(); + plane_state->gamma_correction->is_identity = true; + + plane_state->in_transfer_func = dc_create_transfer_func(); + plane_state->in_transfer_func->type = TF_TYPE_BYPASS; } static void destruct(struct dc_plane_state *plane_state) @@ -175,7 +181,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf) kref_put(&tf->refcount, dc_transfer_func_free); } -struct dc_transfer_func *dc_create_transfer_func(void) +struct dc_transfer_func *dc_create_transfer_func() { struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index f3341a2399fa..a6cf9ade9131 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -956,9 +956,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx, tf = plane_state->in_transfer_func; if (plane_state->gamma_correction && - plane_state->gamma_correction->is_identity) - dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS); - else if (plane_state->gamma_correction && dce_use_lut(plane_state->format)) + !plane_state->gamma_correction->is_identity + && dce_use_lut(plane_state->format)) dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction); if (tf == NULL) -- cgit v1.2.1 From 84ffa80123f56f80145dc638f21dfcbedda5610d Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Thu, 29 Mar 2018 17:04:12 -0400 Subject: drm/amd/display: Fix dim display on DCE11 Before programming the input gamma, check that we're not using the identity correction. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 78bf4fae9e0d..52427ae42e0f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -280,7 +280,9 @@ dce110_set_input_transfer_func(struct pipe_ctx *pipe_ctx, build_prescale_params(&prescale_params, plane_state); ipp->funcs->ipp_program_prescale(ipp, &prescale_params); - if (plane_state->gamma_correction && dce_use_lut(plane_state->format)) + if (plane_state->gamma_correction && + !plane_state->gamma_correction->is_identity && + dce_use_lut(plane_state->format)) ipp->funcs->ipp_program_input_lut(ipp, plane_state->gamma_correction); if (tf == NULL) { -- cgit v1.2.1 From e405c2173e8c1c8e2e823592581ba3f774e7811c Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Wed, 4 Apr 2018 16:01:30 -0400 Subject: drm/amd/display: Fix memleak on input transfer function Input transfer function creation is now done when the plane is created. This is done within the following change: Author: Anthony Koo drm/amd/display: Have DC manage its own allocation of gamma Therefore, we no longer need to create it when filling in the plane attributes. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 18f221b0349d..265f0166f688 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2011,7 +2011,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, const struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(plane_state->fb); const struct drm_crtc *crtc = plane_state->crtc; - struct dc_transfer_func *input_tf; int ret = 0; if (!fill_rects_from_plane_state(plane_state, dc_plane_state)) @@ -2025,13 +2024,6 @@ static int fill_plane_attributes(struct amdgpu_device *adev, if (ret) return ret; - input_tf = dc_create_transfer_func(); - - if (input_tf == NULL) - return -ENOMEM; - - dc_plane_state->in_transfer_func = input_tf; - /* * Always set input transfer function, since plane state is refreshed * every time. -- cgit v1.2.1 From 1bb5afd768b950e9ddcb62b3c31bce8bed3ef774 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Sun, 8 Apr 2018 14:39:18 +0800 Subject: drm/amdgpu: fix null pointer panic with direct fw loading on gpu reset When system uses fw direct loading, then psp context structure won't be initiliazed. And it is also unable to execute mode reset. [ 434.601474] amdgpu 0000:0c:00.0: GPU reset begin! [ 434.694326] amdgpu 0000:0c:00.0: GPU reset [ 434.743152] BUG: unable to handle kernel NULL pointer dereference at 0000000000000058 [ 434.838474] IP: psp_gpu_reset+0xc/0x30 [amdgpu] [ 434.893532] PGD 406ed9067 [ 434.893533] P4D 406ed9067 [ 434.926376] PUD 400b46067 [ 434.959217] PMD 0 [ 435.033379] Oops: 0000 [#1] SMP [ 435.072573] Modules linked in: amdgpu(OE) chash(OE) gpu_sched(OE) ttm(OE) drm_kms_helper(OE) drm(OE) fb_sys_fops syscopyarea sysfillrect sysimgblt rpcsec_gss_krb5 auth_rpcgss nfsv4 nfs lockd grace fscache snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep snd_pcm edac_mce_amd snd_seq_midi snd_seq_midi_event kvm_amd snd_rawmidi kvm irqbypass crct10dif_pclmul crc32_pclmul snd_seq ghash_clmulni_intel snd_seq_device pcbc snd_timer eeepc_wmi aesni_intel snd asus_wmi aes_x86_64 sparse_keymap crypto_simd glue_helper joydev soundcore wmi_bmof cryptd video i2c_piix4 shpchp 8250_dw i2c_designware_platform mac_hid i2c_designware_core sunrpc parport_pc ppdev lp parport autofs4 hid_generic igb usbhid dca ptp mxm_wmi pps_core ahci hid i2c_algo_bit [ 435.931754] libahci wmi Signed-off-by: Huang Rui Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 19e71f4a8ac2..c7d43e064fc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -505,6 +505,9 @@ failed: int psp_gpu_reset(struct amdgpu_device *adev) { + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + return 0; + return psp_mode1_reset(&adev->psp); } -- cgit v1.2.1 From f73f9e35a2a7c8cee3691a4b7313bbc3b95eec6b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 12:36:57 +0800 Subject: drm/amd/pp: Refine pp_atomfwctrl_get_vbios_bootup_values In order to share pp_atomfwctrl_get_vbios_bootup_values on asics with different BIOS_CLKID. Not call function pp_atomfwctrl_get_clk_information_by_clkid in pp_atomfwctrl_get_vbios_bootup_values. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 9 +-------- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h | 2 ++ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 6 ++++++ 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index 0adaf36b6d68..c97b0e5ba43b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -488,7 +488,7 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr, return 0; } -int pp_atomfwctrl__get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) +int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKID id, uint32_t *frequency) { struct amdgpu_device *adev = hwmgr->adev; struct atom_get_smu_clock_info_parameters_v3_1 parameters; @@ -515,7 +515,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, { struct atom_firmware_info_v3_1 *info = NULL; uint16_t ix; - uint32_t frequency = 0; ix = GetIndexIntoMasterDataTable(firmwareinfo); info = (struct atom_firmware_info_v3_1 *) @@ -538,12 +537,6 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, boot_values->ulSocClk = 0; boot_values->ulDCEFClk = 0; - if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) - boot_values->ulSocClk = frequency; - - if (!pp_atomfwctrl__get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) - boot_values->ulDCEFClk = frequency; - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h index 8df1e84f27c9..fe10aa4db5e6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h @@ -230,6 +230,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_bios_boot_up_values *boot_values); int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, struct pp_atomfwctrl_smc_dpm_parameters *param); +int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, + BIOS_CLKID id, uint32_t *frequency); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index c9fb4b2cf5c6..ba299424f8f6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2481,6 +2481,12 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) data->vbios_boot_state.mvddc = boot_up_values.usMvddc; data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; + pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, + SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); + + pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, + SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); + data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; if (0 != boot_up_values.usVddc) { -- cgit v1.2.1 From e6636ae1b7aab30a1fb4ea7805b5b6b2494eca71 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 10 Apr 2018 12:30:59 +0800 Subject: drm/amdgpu: add MP1 and THM hw ip base reg offset Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 21272ce74b56..7e5defbfc3b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1391,6 +1391,7 @@ enum amd_hw_ip_block_type { ATHUB_HWIP, NBIO_HWIP, MP0_HWIP, + MP1_HWIP, UVD_HWIP, VCN_HWIP = UVD_HWIP, VCE_HWIP, @@ -1400,6 +1401,7 @@ enum amd_hw_ip_block_type { SMUIO_HWIP, PWR_HWIP, NBIF_HWIP, + THM_HWIP, MAX_HWIP }; diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index 4c45db7f1157..45aafca7f315 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -38,6 +38,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i])); adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i])); adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i])); @@ -49,7 +50,7 @@ int vega10_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[PWR_HWIP][i] = (uint32_t *)(&(PWR_BASE.instance[i])); adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIF_BASE.instance[i])); - + adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); } return 0; } -- cgit v1.2.1 From b8a5559112714bb328330dbf2a4a1912e8c7a462 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 10 Apr 2018 12:32:16 +0800 Subject: drm/amd/pp: use soc15 common macros instead of vega10 specific pp_soc15.h is vega10 specific. Update powerplay code to use soc15 common macros defined in soc15_common.h. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 7 +- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 16 +-- .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 50 ++++------ .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 107 ++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 1 - .../gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c | 37 +++---- drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h | 52 ---------- .../gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 37 +++---- .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 50 ++++------ .../gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c | 56 ++++------- 10 files changed, 133 insertions(+), 280 deletions(-) delete mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 055358b95fdf..6ba3b1fa57aa 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -34,7 +34,7 @@ #include "rv_ppsmc.h" #include "smu10_hwmgr.h" #include "power_state.h" -#include "pp_soc15.h" +#include "soc15_common.h" #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */ @@ -947,9 +947,8 @@ static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simpl static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr) { - uint32_t reg_offset = soc15_get_register_offset(THM_HWID, 0, - mmTHM_TCON_CUR_TMP_BASE_IDX, mmTHM_TCON_CUR_TMP); - uint32_t reg_value = cgs_read_register(hwmgr->device, reg_offset); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t reg_value = RREG32_SOC15(THM, 0, mmTHM_TCON_CUR_TMP); int cur_temp = (reg_value & THM_TCON_CUR_TMP__CUR_TEMP_MASK) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index ba299424f8f6..f6427c88f6a7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -36,7 +36,7 @@ #include "smu9.h" #include "smu9_driver_if.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pppcielanes.h" #include "vega10_hwmgr.h" #include "vega10_processpptables.h" @@ -754,7 +754,6 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) uint32_t config_telemetry = 0; struct pp_atomfwctrl_voltage_table vol_table; struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); if (data == NULL) @@ -860,10 +859,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) advanceFanControlParameters.usFanPWMMinLimit * hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; - reg = soc15_get_register_offset(DF_HWID, 0, - mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, - mmDF_CS_AON0_DramBaseAddress0); - data->mem_channels = (cgs_read_register(hwmgr->device, reg) & + data->mem_channels = (RREG32_SOC15(DF, 0, mmDF_CS_AON0_DramBaseAddress0) & DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), @@ -3808,11 +3804,12 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t sclk_idx, mclk_idx, activity_percent = 0; struct vega10_hwmgr *data = hwmgr->backend; struct vega10_dpm_table *dpm_table = &data->dpm_table; int ret = 0; - uint32_t reg, val_vid; + uint32_t val_vid; switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: @@ -3862,10 +3859,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, } break; case AMDGPU_PP_SENSOR_VDDGFX: - reg = soc15_get_register_offset(SMUIO_HWID, 0, - mmSMUSVI0_PLANE0_CURRENTVID_BASE_IDX, - mmSMUSVI0_PLANE0_CURRENTVID); - val_vid = (cgs_read_register(hwmgr->device, reg) & + val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) & SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK) >> SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT; *((uint32_t *)value) = (uint32_t)convert_to_vddc((uint8_t)val_vid); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index 203a6918395b..a9efd8554fbc 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -27,7 +27,7 @@ #include "vega10_ppsmc.h" #include "vega10_inc.h" #include "pp_debug.h" -#include "pp_soc15.h" +#include "soc15_common.h" static const struct vega10_didt_config_reg SEDiDtTuningCtrlConfig_Vega10[] = { @@ -888,36 +888,36 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable) if (PP_CAP(PHM_PlatformCaps_DiDtEDCEnable)) { if (PP_CAP(PHM_PlatformCaps_SQRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_SQ_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_DBRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_DB_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_TDRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_TD_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_TCPRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_TCP_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_EDC_CTRL, data); } if (PP_CAP(PHM_PlatformCaps_DBRRamping)) { data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL); - data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); - data = CGS_REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); + data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_EN, en); + data = REG_SET_FIELD(data, DIDT_DBR_EDC_CTRL, EDC_SW_RST, ~en); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DBR_EDC_CTRL, data); } } @@ -933,17 +933,15 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; adev->gfx.rlc.funcs->enter_safe_mode(adev); mutex_lock(&adev->grbm_idx_mutex); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -958,7 +956,7 @@ static int vega10_enable_cac_driving_se_didt_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); @@ -986,17 +984,15 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; adev->gfx.rlc.funcs->enter_safe_mode(adev); mutex_lock(&adev->grbm_idx_mutex); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtStallCtrlConfig_vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEDiDtStallPatternConfig_vega10, VEGA10_CONFIGREG_DIDT); @@ -1005,7 +1001,7 @@ static int vega10_enable_psm_gc_didt_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); @@ -1049,17 +1045,15 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) struct amdgpu_device *adev = hwmgr->adev; int result; uint32_t num_se = 0, count, data; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; adev->gfx.rlc.funcs->enter_safe_mode(adev); mutex_lock(&adev->grbm_idx_mutex); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result = vega10_program_didt_config_registers(hwmgr, SEDiDtWeightConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, SEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1070,7 +1064,7 @@ static int vega10_enable_se_edc_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); @@ -1099,7 +1093,6 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) int result; uint32_t num_se = 0; uint32_t count, data; - uint32_t reg; num_se = adev->gfx.config.max_shader_engines; @@ -1108,10 +1101,9 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) vega10_program_gc_didt_config_registers(hwmgr, AvfsPSMResetConfig_vega10); mutex_lock(&adev->grbm_idx_mutex); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); - cgs_write_register(hwmgr->device, reg, data); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); @@ -1120,7 +1112,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) if (0 != result) break; } - cgs_write_register(hwmgr->device, reg, 0xE0000000); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); mutex_unlock(&adev->grbm_idx_mutex); vega10_didt_set_mask(hwmgr, true); @@ -1165,14 +1157,12 @@ static int vega10_disable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) static int vega10_enable_se_edc_force_stall_config(struct pp_hwmgr *hwmgr) { struct amdgpu_device *adev = hwmgr->adev; - uint32_t reg; int result; adev->gfx.rlc.funcs->enter_safe_mode(adev); mutex_lock(&adev->grbm_idx_mutex); - reg = soc15_get_register_offset(GC_HWID, 0, mmGRBM_GFX_INDEX_BASE_IDX, mmGRBM_GFX_INDEX); - cgs_write_register(hwmgr->device, reg, 0xE0000000); + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000); mutex_unlock(&adev->grbm_idx_mutex); result = vega10_program_didt_config_registers(hwmgr, SEEDCForceStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index 9f18226a56ea..aa044c1955fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -25,7 +25,7 @@ #include "vega10_hwmgr.h" #include "vega10_ppsmc.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pp_debug.h" static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) @@ -89,6 +89,7 @@ int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; uint32_t tach_period; uint32_t crystal_clock_freq; @@ -100,10 +101,8 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) if (data->smu_features[GNLD_FAN_CONTROL].supported) { result = vega10_get_current_rpm(hwmgr, speed); } else { - uint32_t reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); tach_period = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS), CG_TACH_STATUS, TACH_PERIOD); @@ -127,26 +126,23 @@ int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) */ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) { - uint32_t reg; - - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + struct amdgpu_device *adev = hwmgr->adev; if (hwmgr->fan_ctrl_is_in_default_mode) { hwmgr->fan_ctrl_default_mode = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE); hwmgr->tmin = - CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN); hwmgr->fan_ctrl_is_in_default_mode = false; } - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN, 0)); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE, mode)); return 0; @@ -159,18 +155,15 @@ int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) */ int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) { - uint32_t reg; - - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + struct amdgpu_device *adev = hwmgr->adev; if (!hwmgr->fan_ctrl_is_in_default_mode) { - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, FDO_PWM_MODE, hwmgr->fan_ctrl_default_mode)); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TMIN, hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); hwmgr->fan_ctrl_is_in_default_mode = true; @@ -257,10 +250,10 @@ int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, uint32_t speed) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t duty100; uint32_t duty; uint64_t tmp64; - uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan) return 0; @@ -271,10 +264,7 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl)) vega10_fan_ctrl_stop_smc_fan_control(hwmgr); - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); - - duty100 = CGS_REG_GET_FIELD(cgs_read_register(hwmgr->device, reg), + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), CG_FDO_CTRL1, FMAX_DUTY100); if (duty100 == 0) @@ -284,10 +274,8 @@ int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, do_div(tmp64, 100); duty = (uint32_t)tmp64; - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); @@ -317,10 +305,10 @@ int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) */ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t tach_period; uint32_t crystal_clock_freq; int result = 0; - uint32_t reg; if (hwmgr->thermal_controller.fanInfo.bNoFan || (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || @@ -333,10 +321,8 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) if (!result) { crystal_clock_freq = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_TACH_STATUS, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_STATUS), CG_TACH_STATUS, TACH_PERIOD, tach_period)); } @@ -350,13 +336,10 @@ int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) */ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int temp; - uint32_t reg; - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); - - temp = cgs_read_register(hwmgr->device, reg); + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; @@ -379,11 +362,12 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct amdgpu_device *adev = hwmgr->adev; int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t val, reg; + uint32_t val; if (low < range->min) low = range->min; @@ -393,20 +377,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low > high) return -EINVAL; - reg = soc15_get_register_offset(THM_HWID, 0, - mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); - - val = cgs_read_register(hwmgr->device, reg); + val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) & (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); return 0; } @@ -418,21 +399,17 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, */ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_TACH_CTRL, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL), CG_TACH_CTRL, EDGE_PER_REV, hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution - 1)); } - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); - cgs_write_register(hwmgr->device, reg, - CGS_REG_SET_FIELD(cgs_read_register(hwmgr->device, reg), + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2), CG_FDO_CTRL2, TACH_PWM_RESP_RATE, 0x28)); return 0; @@ -445,9 +422,9 @@ static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) */ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; uint32_t val = 0; - uint32_t reg; if (data->smu_features[GNLD_FW_CTF].supported) { if (data->smu_features[GNLD_FW_CTF].enabled) @@ -465,8 +442,7 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); return 0; } @@ -477,8 +453,8 @@ static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) */ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; struct vega10_hwmgr *data = hwmgr->backend; - uint32_t reg; if (data->smu_features[GNLD_FW_CTF].supported) { if (!data->smu_features[GNLD_FW_CTF].enabled) @@ -493,8 +469,7 @@ int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) data->smu_features[GNLD_FW_CTF].enabled = false; } - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 6a85238ae20f..7dca75cdf722 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -34,7 +34,6 @@ #include "atomfirmware.h" #include "cgs_common.h" #include "vega12_inc.h" -#include "pp_soc15.h" #include "pppcielanes.h" #include "vega12_hwmgr.h" #include "vega12_processpptables.h" diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c index df0fa815cd6e..cfd9e6ccb790 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c @@ -26,7 +26,7 @@ #include "vega12_smumgr.h" #include "vega12_ppsmc.h" #include "vega12_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "pp_debug.h" static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) @@ -147,13 +147,10 @@ int vega12_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) */ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; int temp = 0; - uint32_t reg; - reg = soc15_get_register_offset(THM_HWID, 0, - mmCG_MULT_THERMAL_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); - - temp = cgs_read_register(hwmgr->device, reg); + temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS); temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; @@ -175,11 +172,12 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr) static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range) { + struct amdgpu_device *adev = hwmgr->adev; int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP * PP_TEMPERATURE_UNITS_PER_CENTIGRADES; - uint32_t val, reg; + uint32_t val; if (low < range->min) low = range->min; @@ -189,18 +187,15 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, if (low > high) return -EINVAL; - reg = soc15_get_register_offset(THM_HWID, 0, - mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); - - val = cgs_read_register(hwmgr->device, reg); + val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)); val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val); return 0; } @@ -212,15 +207,14 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, */ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t val = 0; - uint32_t reg; val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT); val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT); - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, val); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val); return 0; } @@ -231,10 +225,9 @@ static int vega12_thermal_enable_alert(struct pp_hwmgr *hwmgr) */ int vega12_thermal_disable_alert(struct pp_hwmgr *hwmgr) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(THM_HWID, 0, mmTHM_THERMAL_INT_ENA_BASE_IDX, mmTHM_THERMAL_INT_ENA); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h deleted file mode 100644 index 214f370c5efd..000000000000 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SOC15_H -#define PP_SOC15_H - -#include "soc15_hw_ip.h" -#include "vega10_ip_offset.h" - -inline static uint32_t soc15_get_register_offset( - uint32_t hw_id, - uint32_t inst, - uint32_t segment, - uint32_t offset) -{ - uint32_t reg = 0; - - if (hw_id == THM_HWID) - reg = THM_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == NBIF_HWID) - reg = NBIF_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == MP1_HWID) - reg = MP1_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == DF_HWID) - reg = DF_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == GC_HWID) - reg = GC_BASE.instance[inst].segment[segment] + offset; - else if (hw_id == SMUIO_HWID) - reg = SMUIO_BASE.instance[inst].segment[segment] + offset; - return reg; -} - -#endif diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index bc53f2beda30..9adea7263774 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -23,7 +23,7 @@ #include "smumgr.h" #include "smu10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "smu10_smumgr.h" #include "ppatomctrl.h" #include "rv_ppsmc.h" @@ -49,48 +49,41 @@ static uint32_t smu10_wait_for_response(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); } static int smu10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } static int smu10_read_arg_from_smc(struct pp_hwmgr *hwmgr) { - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + struct amdgpu_device *adev = hwmgr->adev; - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; smu10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); smu10_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -104,17 +97,13 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; smu10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); smu10_send_msg_to_smc_without_waiting(hwmgr, msg); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 4aafb043bcb0..14ac6d15c7a7 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -23,7 +23,7 @@ #include "smumgr.h" #include "vega10_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "vega10_smumgr.h" #include "vega10_hwmgr.h" #include "vega10_ppsmc.h" @@ -54,18 +54,13 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - uint32_t mp1_fw_flags, reg; - - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); + struct amdgpu_device *adev = hwmgr->adev; + uint32_t mp1_fw_flags; - cgs_write_register(hwmgr->device, reg, + WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - - mp1_fw_flags = cgs_read_register(hwmgr->device, reg); + mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) return true; @@ -81,11 +76,11 @@ static bool vega10_is_smc_ram_running(struct pp_hwmgr *hwmgr) */ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; uint32_t ret; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); ret = phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); @@ -93,7 +88,7 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) if (ret) pr_err("No response from smu\n"); - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); } /* @@ -105,11 +100,9 @@ static uint32_t vega10_wait_for_response(struct pp_hwmgr *hwmgr) static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } @@ -122,14 +115,12 @@ static int vega10_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, */ static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; uint32_t ret; vega10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); vega10_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -150,18 +141,14 @@ static int vega10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; uint32_t ret; vega10_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); vega10_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -174,12 +161,9 @@ static int vega10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, static int vega10_get_argument(struct pp_hwmgr *hwmgr) { - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + struct amdgpu_device *adev = hwmgr->adev; - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); } static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index 651a3f28734b..7d9b40e8b1bf 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -23,7 +23,7 @@ #include "smumgr.h" #include "vega12_inc.h" -#include "pp_soc15.h" +#include "soc15_common.h" #include "vega12_smumgr.h" #include "vega12_ppsmc.h" #include "vega12/smu9_driver_if.h" @@ -44,18 +44,13 @@ static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr) { - uint32_t mp1_fw_flags, reg; + struct amdgpu_device *adev = hwmgr->adev; + uint32_t mp1_fw_flags; - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); - - cgs_write_register(hwmgr->device, reg, + WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); - reg = soc15_get_register_offset(NBIF_HWID, 0, - mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); - - mp1_fw_flags = cgs_read_register(hwmgr->device, reg); + mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2); if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) @@ -72,15 +67,15 @@ static bool vega12_is_smc_ram_running(struct pp_hwmgr *hwmgr) */ static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev = hwmgr->adev; uint32_t reg; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); phm_wait_for_register_unequal(hwmgr, reg, 0, MP1_C2PMSG_90__CONTENT_MASK); - return cgs_read_register(hwmgr->device, reg); + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); } /* @@ -92,11 +87,9 @@ static uint32_t vega12_wait_for_response(struct pp_hwmgr *hwmgr) int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, msg); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); return 0; } @@ -109,13 +102,11 @@ int vega12_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, */ int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; vega12_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); vega12_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -135,17 +126,13 @@ int vega12_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; vega12_wait_for_response(hwmgr); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); - cgs_write_register(hwmgr->device, reg, 0); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); - cgs_write_register(hwmgr->device, reg, parameter); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); vega12_send_msg_to_smc_without_waiting(hwmgr, msg); @@ -166,11 +153,9 @@ int vega12_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, int vega12_send_msg_to_smc_with_parameter_without_waiting( struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); - cgs_write_register(hwmgr->device, reg, parameter); + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, parameter); return vega12_send_msg_to_smc_without_waiting(hwmgr, msg); } @@ -183,12 +168,9 @@ int vega12_send_msg_to_smc_with_parameter_without_waiting( */ int vega12_read_arg_from_smc(struct pp_hwmgr *hwmgr, uint32_t *arg) { - uint32_t reg; - - reg = soc15_get_register_offset(MP1_HWID, 0, - mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + struct amdgpu_device *adev = hwmgr->adev; - *arg = cgs_read_register(hwmgr->device, reg); + *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); return 0; } -- cgit v1.2.1 From c11d8afe10228e4621acfcb8f302255ea8567a1e Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 10 Apr 2018 13:05:49 +0800 Subject: drm/amd/pp: fix the wrong readout engine clock in deep sleep Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 13 ++++--------- drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h | 1 + 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f6427c88f6a7..c90502bcc2b2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3805,7 +3805,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, void *value, int *size) { struct amdgpu_device *adev = hwmgr->adev; - uint32_t sclk_idx, mclk_idx, activity_percent = 0; + uint32_t sclk_mhz, mclk_idx, activity_percent = 0; struct vega10_hwmgr *data = hwmgr->backend; struct vega10_dpm_table *dpm_table = &data->dpm_table; int ret = 0; @@ -3813,14 +3813,9 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, switch (idx) { case AMDGPU_PP_SENSOR_GFX_SCLK: - smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); - sclk_idx = smum_get_argument(hwmgr); - if (sclk_idx < dpm_table->gfx_table.count) { - *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; - *size = 4; - } else { - ret = -EINVAL; - } + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency); + sclk_mhz = smum_get_argument(hwmgr); + *((uint32_t *)value) = sclk_mhz * 100; break; case AMDGPU_PP_SENSOR_GFX_MCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex); diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h index c3ed737ab951..715b5a168831 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h @@ -131,6 +131,7 @@ typedef uint16_t PPSMC_Result; #define PPSMC_MSG_RunAcgInOpenLoop 0x5E #define PPSMC_MSG_InitializeAcg 0x5F #define PPSMC_MSG_GetCurrPkgPwr 0x61 +#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63 #define PPSMC_MSG_SetPccThrottleLevel 0x67 #define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 #define PPSMC_Message_Count 0x69 -- cgit v1.2.1 From f1018f50d48395b4a189bf8ea9af1e4441209cfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 5 Apr 2018 14:46:41 +0200 Subject: drm/amdgpu: use ctx bytes_moved MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of the global (inaccurate) counter. Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d7d7ce1507ec..de69ab12bb55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -412,7 +412,6 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *candidate = p->evictable; struct amdgpu_bo *bo = candidate->robj; struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u64 initial_bytes_moved, bytes_moved; bool update_bytes_moved_vis; uint32_t other; @@ -436,18 +435,15 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, continue; /* Good we can try to move this BO somewhere else */ - amdgpu_ttm_placement_from_domain(bo, other); update_bytes_moved_vis = adev->gmc.visible_vram_size < adev->gmc.real_vram_size && bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT; - initial_bytes_moved = atomic64_read(&adev->num_bytes_moved); + amdgpu_ttm_placement_from_domain(bo, other); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - bytes_moved = atomic64_read(&adev->num_bytes_moved) - - initial_bytes_moved; - p->bytes_moved += bytes_moved; + p->bytes_moved += ctx.bytes_moved; if (update_bytes_moved_vis) - p->bytes_moved_vis += bytes_moved; + p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r)) break; -- cgit v1.2.1 From 5422a28fe86f9f77480471385e0a416c27a9ca72 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 5 Apr 2018 16:42:03 +0200 Subject: drm/amdgpu: fix and cleanup cpu visible VRAM handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The detection if a BO was placed in CPU visible VRAM was incorrect. Fix it and merge it with the correct detection in amdgpu_ttm.c Signed-off-by: Christian König Reviewed-by: Michel Dänzer Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 21 +++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 19 +++---------------- 3 files changed, 26 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index de69ab12bb55..68af2f878bc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -382,8 +382,7 @@ retry: p->bytes_moved += ctx.bytes_moved; if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && - bo->tbo.mem.mem_type == TTM_PL_VRAM && - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) + amdgpu_bo_in_cpu_visible_vram(bo)) p->bytes_moved_vis += ctx.bytes_moved; if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains && @@ -437,8 +436,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, /* Good we can try to move this BO somewhere else */ update_bytes_moved_vis = adev->gmc.visible_vram_size < adev->gmc.real_vram_size && - bo->tbo.mem.mem_type == TTM_PL_VRAM && - bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT; + amdgpu_bo_in_cpu_visible_vram(bo); amdgpu_ttm_placement_from_domain(bo, other); r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); p->bytes_moved += ctx.bytes_moved; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 546f77cb7882..3bee13344065 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -195,6 +195,27 @@ static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo) } } +/** + * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM + */ +static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; + struct drm_mm_node *node = bo->tbo.mem.mm_node; + unsigned long pages_left; + + if (bo->tbo.mem.mem_type != TTM_PL_VRAM) + return false; + + for (pages_left = bo->tbo.mem.num_pages; pages_left; + pages_left -= node->size, node++) + if (node->start < fpfn) + return true; + + return false; +} + /** * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 205da3ff9cd0..ab73300e6c7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -223,20 +223,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, if (!adev->mman.buffer_funcs_enabled) { amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && - !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { - unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; - struct drm_mm_node *node = bo->mem.mm_node; - unsigned long pages_left; - - for (pages_left = bo->mem.num_pages; - pages_left; - pages_left -= node->size, node++) { - if (node->start < fpfn) - break; - } - - if (!pages_left) - goto gtt; + !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && + amdgpu_bo_in_cpu_visible_vram(abo)) { /* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this @@ -245,12 +233,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT); - abo->placements[0].fpfn = fpfn; + abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { -gtt: amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; -- cgit v1.2.1 From 45a2d58e84e6d28c2d9ae8e68bd815d9a98ad52e Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 10 Apr 2018 16:08:44 -0400 Subject: drm/amd/display: Fix 64-bit division in hwss_edp_power_control Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 52427ae42e0f..68a182ce53c7 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -857,7 +857,7 @@ void hwss_edp_power_control( dm_get_elapse_time_in_ns( ctx, current_ts, - link->link_trace.time_stamp.edp_poweroff) / 1000000; + div64_u64(link->link_trace.time_stamp.edp_poweroff, 1000000)); unsigned long long wait_time_ms = 0; /* max 500ms from LCDVDD off to on */ -- cgit v1.2.1 From 2fa417324abd635294c298f9f3119743055bf5b9 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 9 Apr 2018 14:27:46 -0400 Subject: drm/amd/display: Remove PRE_VEGA flag We enabled this upstream by default now and no longer need the flag. Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 --- drivers/gpu/drm/amd/display/Kconfig | 8 -------- 2 files changed, 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index abc33464959e..62d6505ade84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2093,9 +2093,6 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_POLARIS12: case CHIP_TONGA: case CHIP_FIJI: -#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) - return amdgpu_dc != 0; -#endif case CHIP_VEGA10: case CHIP_VEGA12: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 5b124a67404c..d5d4586e6176 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -9,14 +9,6 @@ config DRM_AMD_DC support for AMDGPU. This adds required support for Vega and Raven ASICs. -config DRM_AMD_DC_PRE_VEGA - bool "DC support for Polaris and older ASICs" - default y - help - Choose this option to enable the new DC support for older asics - by default. This includes Polaris, Carrizo, Tonga, Bonaire, - and Hawaii. - config DRM_AMD_DC_FBC bool "AMD FBC - Enable Frame Buffer Compression" depends on DRM_AMD_DC -- cgit v1.2.1 From db4b37975888cf22e39f2cabc6590167faabaeaa Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 9 Apr 2018 14:04:56 -0400 Subject: drm/amd/display: Don't spam debug messages Signed-off-by: Harry Wentland Reviewed-by: Leo (Sunpeng) Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/logger_types.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 4f332e80cecc..b608a0830801 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -32,7 +32,7 @@ #define DC_LOG_ERROR(...) DRM_ERROR(__VA_ARGS__) #define DC_LOG_WARNING(...) DRM_WARN(__VA_ARGS__) -#define DC_LOG_DEBUG(...) DRM_INFO(__VA_ARGS__) +#define DC_LOG_DEBUG(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_DC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_DTN(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SURFACE(...) pr_debug("[SURFACE]:"__VA_ARGS__) -- cgit v1.2.1 From d6014e776ceb5da2d86ce405f692522f2b0370f2 Mon Sep 17 00:00:00 2001 From: Shirish S Date: Wed, 28 Mar 2018 12:22:22 +0530 Subject: drm/amd/display: remove dummy is_blanked() to optimise boot time is_blanked() hook is a dummy one for underlay pipe, hence when called, it loops for ~300ms at boot. This patch removes this dummy call and adds missing checks. Signed-off-by: Shirish S Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 3 +++ drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c | 3 ++- drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c | 7 ------- 3 files changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index ebc96b720083..481f6928a9c0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -230,6 +230,9 @@ bool hwss_wait_for_blank_complete( { int counter; + /* Not applicable if the pipe is not primary, save 300ms of boot time */ + if (!tg->funcs->is_blanked) + return true; for (counter = 0; counter < 100; counter++) { if (tg->funcs->is_blanked(tg)) break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c index 487724345d9d..0275d6d60da4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.c @@ -53,7 +53,8 @@ void dce_pipe_control_lock(struct dc *dc, struct dce_hwseq *hws = dc->hwseq; /* Not lock pipe when blank */ - if (lock && pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) + if (lock && pipe->stream_res.tg->funcs->is_blanked && + pipe->stream_res.tg->funcs->is_blanked(pipe->stream_res.tg)) return; val = REG_GET_4(BLND_V_UPDATE_LOCK[pipe->stream_res.tg->inst], diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c index 8ad04816e7d3..a3cef60380ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator_v.c @@ -648,12 +648,6 @@ static void dce110_timing_generator_v_disable_vga( return; } -static bool dce110_tg_v_is_blanked(struct timing_generator *tg) -{ - /* Signal comes from the primary pipe, underlay is never blanked. */ - return false; -} - /** ******************************************************************************************** * * DCE11 Timing Generator Constructor / Destructor @@ -670,7 +664,6 @@ static const struct timing_generator_funcs dce110_tg_v_funcs = { .set_early_control = dce110_timing_generator_v_set_early_control, .wait_for_state = dce110_timing_generator_v_wait_for_state, .set_blank = dce110_timing_generator_v_set_blank, - .is_blanked = dce110_tg_v_is_blanked, .set_colors = dce110_timing_generator_v_set_colors, .set_overscan_blank_color = dce110_timing_generator_v_set_overscan_color_black, -- cgit v1.2.1 From 45313e5f1c72962a21df58af52a421e4c076b2d4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Apr 2018 10:58:43 +0800 Subject: drm/amd/pp: Move same macro definitions to hwmgr.h Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 ---- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 4 ---- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 ++ drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 3 --- drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | 3 --- drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c | 3 --- drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | 2 -- drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c | 2 -- drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | 4 ---- drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 2 -- 10 files changed, 2 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 14332159227e..21c021ba0f49 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -61,10 +61,6 @@ #define SMC_CG_IND_START 0xc0030000 #define SMC_CG_IND_END 0xc0040000 -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - #define MEM_FREQ_LOW_LATENCY 25000 #define MEM_FREQ_HIGH_LATENCY 80000 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index c90502bcc2b2..26c56025d56c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -51,10 +51,6 @@ #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - #define HBM_MEMORY_CHANNEL_WIDTH 128 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index e450ec74d6ed..9b3dd7dce4e2 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -38,6 +38,8 @@ struct phm_fan_speed_info; struct pp_atomctrl_voltage_table; #define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 enum DISPLAY_GAP { DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index c28b95fd1c85..2a93f3a8e4f0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -61,9 +61,6 @@ #define SMC_RAM_END 0x40000 -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define CISLAND_MINIMUM_ENGINE_CLOCK 800 #define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index dae3422366b3..53df9405f43a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -53,10 +53,7 @@ #define FIJI_SMC_SIZE 0x20000 -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define VDDC_VDDCI_DELTA 300 #define MC_CG_ARB_FREQ_F1 0x0b diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c index bc05e355012d..415f691c3fa9 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c @@ -60,10 +60,7 @@ #define ICELAND_SMC_SIZE 0x20000 -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define MC_CG_ARB_FREQ_F1 0x0b #define VDDC_VDDCI_DELTA 200 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index d9192286099d..a8c6524f07e4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -52,8 +52,6 @@ #include "dce/dce_10_0_sh_mask.h" #define POLARIS10_SMC_SIZE 0x20000 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define POWERTUNE_DEFAULT_SET_MAX 1 #define VDDC_VDDCI_DELTA 200 #define MC_CG_ARB_FREQ_F1 0x0b diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 9adea7263774..0a563f6fe9ea 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -33,8 +33,6 @@ #include "pp_debug.h" -#define VOLTAGE_SCALE 4 - #define BUFFER_SIZE 80000 #define MAX_STRING_SIZE 15 #define BUFFER_SIZETWO 131072 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 94ba304ff52e..782b19fc2e70 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -55,11 +55,7 @@ #include "dce/dce_10_0_d.h" #include "dce/dce_10_0_sh_mask.h" - -#define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 #define MC_CG_ARB_FREQ_F1 0x0b #define VDDC_VDDCI_DELTA 200 diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 14ac6d15c7a7..e84669c448a3 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -35,8 +35,6 @@ #define AVFS_EN_MSB 1568 #define AVFS_EN_LSB 1568 -#define VOLTAGE_SCALE 4 - /* Microcode file is stored in this buffer */ #define BUFFER_SIZE 80000 #define MAX_STRING_SIZE 15 -- cgit v1.2.1 From 29ae1118d85e8435b12fca512410dbd39920cce9 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Apr 2018 10:58:43 +0800 Subject: drm/amd/pp: Remove unnecessary forward declaration Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 84 +++++++++++----------- 1 file changed, 41 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 26c56025d56c..127c550e8bb1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -75,8 +75,6 @@ static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L -static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask); static const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); @@ -4095,6 +4093,47 @@ static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) } } +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct vega10_hwmgr *data = hwmgr->backend; + + switch (type) { + case PP_SCLK: + data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to lowest!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -EINVAL); + break; + + case PP_MCLK: + data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; + data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to lowest!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -EINVAL); + + break; + + case PP_PCIE: + default: + break; + } + + return 0; +} + static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { @@ -4381,47 +4420,6 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, return result; } -static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, - enum pp_clock_type type, uint32_t mask) -{ - struct vega10_hwmgr *data = hwmgr->backend; - - switch (type) { - case PP_SCLK: - data->smc_state_table.gfx_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.gfx_max_level = mask ? (fls(mask) - 1) : 0; - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), - "Failed to upload boot level to lowest!", - return -EINVAL); - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), - "Failed to upload dpm max level to highest!", - return -EINVAL); - break; - - case PP_MCLK: - data->smc_state_table.mem_boot_level = mask ? (ffs(mask) - 1) : 0; - data->smc_state_table.mem_max_level = mask ? (fls(mask) - 1) : 0; - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), - "Failed to upload boot level to lowest!", - return -EINVAL); - - PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), - "Failed to upload dpm max level to highest!", - return -EINVAL); - - break; - - case PP_PCIE: - default: - break; - } - - return 0; -} - static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf) { -- cgit v1.2.1 From 819a23f83e3b2513cffbef418458a47ca02c36b3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Apr 2018 17:17:22 +0800 Subject: drm/amdgpu: Add APU support in vi_set_uvd_clocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix the issue set uvd clock failed on CZ/ST which lead 1s delay when boot up. Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Acked-by: Christian König Acked-by: Shirish S Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/vi.c | 46 +++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 1b4ee249b95a..51acd7c3d2a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -728,33 +728,57 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, return r; tmp = RREG32_SMC(cntl_reg); - tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | - CG_DCLK_CNTL__DCLK_DIVIDER_MASK); + + if (adev->flags & AMD_IS_APU) + tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK; + else + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK | + CG_DCLK_CNTL__DCLK_DIVIDER_MASK); tmp |= dividers.post_divider; WREG32_SMC(cntl_reg, tmp); for (i = 0; i < 100; i++) { - if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK) - break; + tmp = RREG32_SMC(status_reg); + if (adev->flags & AMD_IS_APU) { + if (tmp & 0x10000) + break; + } else { + if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK) + break; + } mdelay(10); } if (i == 100) return -ETIMEDOUT; - return 0; } +#define ixGNB_CLK1_DFS_CNTL 0xD82200F0 +#define ixGNB_CLK1_STATUS 0xD822010C +#define ixGNB_CLK2_DFS_CNTL 0xD8220110 +#define ixGNB_CLK2_STATUS 0xD822012C + static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) { int r; - r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); - if (r) - return r; + if (adev->flags & AMD_IS_APU) { + r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS); + if (r) + return r; - r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); - if (r) - return r; + r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS); + if (r) + return r; + } else { + r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); + if (r) + return r; + + r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); + if (r) + return r; + } return 0; } -- cgit v1.2.1 From 08ebb6e9f4fd7098c28e0ebbb42847cf0488ebb8 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 10 Apr 2018 17:49:56 +0800 Subject: drm/amdgpu: Add APU support in vi_set_vce_clocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. fix set vce clocks failed on Cz/St which lead 1s delay when boot up. 2. remove the workaround in vce_v3_0.c Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Acked-by: Christian König Acked-by: Shirish S Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/vi.c | 31 +++++++++++++++++++++++++------ 2 files changed, 27 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index 428d1928e44e..ac9617269a2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; vce_v3_0_override_vce_clock_gating(adev, true); - if (!(adev->flags & AMD_IS_APU)) - amdgpu_asic_set_vce_clocks(adev, 10000, 10000); + + amdgpu_asic_set_vce_clocks(adev, 10000, 10000); for (i = 0; i < adev->vce.num_rings; i++) adev->vce.ring[i].ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 51acd7c3d2a9..4034a2863226 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -757,6 +757,8 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, #define ixGNB_CLK1_STATUS 0xD822010C #define ixGNB_CLK2_DFS_CNTL 0xD8220110 #define ixGNB_CLK2_STATUS 0xD822012C +#define ixGNB_CLK3_DFS_CNTL 0xD8220130 +#define ixGNB_CLK3_STATUS 0xD822014C static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) { @@ -788,6 +790,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) int r, i; struct atom_clock_dividers dividers; u32 tmp; + u32 reg_ctrl; + u32 reg_status; + u32 status_mask; + u32 reg_mask; + + if (adev->flags & AMD_IS_APU) { + reg_ctrl = ixGNB_CLK3_DFS_CNTL; + reg_status = ixGNB_CLK3_STATUS; + status_mask = 0x00010000; + reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK; + } else { + reg_ctrl = ixCG_ECLK_CNTL; + reg_status = ixCG_ECLK_STATUS; + status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK; + reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK; + } r = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, @@ -796,24 +814,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) return r; for (i = 0; i < 100; i++) { - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + if (RREG32_SMC(reg_status) & status_mask) break; mdelay(10); } + if (i == 100) return -ETIMEDOUT; - tmp = RREG32_SMC(ixCG_ECLK_CNTL); - tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | - CG_ECLK_CNTL__ECLK_DIVIDER_MASK); + tmp = RREG32_SMC(reg_ctrl); + tmp &= ~reg_mask; tmp |= dividers.post_divider; - WREG32_SMC(ixCG_ECLK_CNTL, tmp); + WREG32_SMC(reg_ctrl, tmp); for (i = 0; i < 100; i++) { - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK) + if (RREG32_SMC(reg_status) & status_mask) break; mdelay(10); } + if (i == 100) return -ETIMEDOUT; -- cgit v1.2.1 From 61279073b1d35ea29bf546c7751bda09610ab5ef Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Mon, 9 Apr 2018 14:53:51 +0800 Subject: amd/powerplay: implement the vega12_force_clock_level interface pp_dpm_sclk/pp_dpm_mclk in sysfs implemented to force gfxclk/uclk dpm level for Vega12 Signed-off-by: Kenneth Feng Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 42 +++++++++++++++++++++- 1 file changed, 41 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 7dca75cdf722..df234db6485e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -991,15 +991,55 @@ static uint32_t vega12_find_highest_dpm_level( static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr) { + struct vega12_hwmgr *data = hwmgr->backend; + if (data->smc_state_table.gfx_boot_level != + data->dpm_table.gfx_table.dpm_state.soft_min_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinByFreq, + PPCLK_GFXCLK<<16 | data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_boot_level].value); + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->smc_state_table.gfx_boot_level; + } + + if (data->smc_state_table.mem_boot_level != + data->dpm_table.mem_table.dpm_state.soft_min_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMinByFreq, + PPCLK_UCLK<<16 | data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_boot_level].value); + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->smc_state_table.mem_boot_level; + } + return 0; + } static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr) { + struct vega12_hwmgr *data = hwmgr->backend; + if (data->smc_state_table.gfx_max_level != + data->dpm_table.gfx_table.dpm_state.soft_max_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxByFreq, + /* plus the vale by 1 to align the resolution */ + PPCLK_GFXCLK<<16 | (data->dpm_table.gfx_table.dpm_levels[data->smc_state_table.gfx_max_level].value + 1)); + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->smc_state_table.gfx_max_level; + } + + if (data->smc_state_table.mem_max_level != + data->dpm_table.mem_table.dpm_state.soft_max_level) { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxByFreq, + /* plus the vale by 1 to align the resolution */ + PPCLK_UCLK<<16 | (data->dpm_table.mem_table.dpm_levels[data->smc_state_table.mem_max_level].value + 1)); + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->smc_state_table.mem_max_level; + } + return 0; } - int vega12_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) { struct vega12_hwmgr *data = -- cgit v1.2.1 From 564be2fc2b1ddb6cbef2bd77f83e91c9e4a1063f Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Wed, 4 Apr 2018 15:17:22 +0800 Subject: drm/amd/powerplay: Get more than 8 level gfxclk states To apply on Vega12 for more than 8 gfx dpm levels Signed-off-by: Kenneth Feng Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h index bc98b1df3b65..e81ded1ec198 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h @@ -33,7 +33,7 @@ #define WaterMarksExist 1 #define WaterMarksLoaded 2 -#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 8 +#define VG12_PSUEDO_NUM_GFXCLK_DPM_LEVELS 16 #define VG12_PSUEDO_NUM_SOCCLK_DPM_LEVELS 8 #define VG12_PSUEDO_NUM_DCEFCLK_DPM_LEVELS 8 #define VG12_PSUEDO_NUM_UCLK_DPM_LEVELS 4 -- cgit v1.2.1 From 0bc8f3d29b188b273e92cd895da3b5c31e86434f Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 10 Apr 2018 17:05:36 +0800 Subject: drm/amd/powerplay: initialzie the dpm intial enabled state To expose the right dpm levels to the sysfs Signed-off-by: Kenneth Feng Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index df234db6485e..3e1ed0aca29c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -545,6 +545,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -564,6 +565,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -584,6 +586,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -604,6 +607,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -624,6 +628,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -644,6 +649,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -665,6 +671,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -685,6 +692,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -705,6 +713,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); @@ -725,6 +734,7 @@ static int vega12_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) return -EINVAL); dpm_table->dpm_levels[i].value = clock; + dpm_table->dpm_levels[i].enabled = true; } vega12_init_dpm_state(&(dpm_table->dpm_state)); -- cgit v1.2.1 From 18081c2003915dadc3507b79cf6453f997948ded Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 18:33:15 +0800 Subject: drm/amd/pp: Remove dead function in smu7_smumgr.c Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 10 ---------- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h | 1 - 2 files changed, 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 41fab2df994e..8b9518a64121 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -231,16 +231,6 @@ int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr) return 0; } -int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr) -{ - if (!smu7_is_smc_ram_running(hwmgr)) - return -EINVAL; - - PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, SMC_SYSCON_CLOCK_CNTL_0, cken, 0); - return 0; -} - - enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type) { enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h index 126d300259ba..39c9bfda0ab4 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h @@ -67,7 +67,6 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter); int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr); -int smu7_wait_for_smc_inactive(struct pp_hwmgr *hwmgr); enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type); int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, -- cgit v1.2.1 From 89a111476676add9ded0286fc7606508b5efb101 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 18:41:08 +0800 Subject: drm/amd/pp: Remove useless smu7 running state check Only check smc running state before start smu. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 8b9518a64121..fb32a3fcc278 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -167,10 +167,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!smu7_is_smc_ram_running(hwmgr)) - return -EINVAL; - - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); @@ -199,10 +195,6 @@ int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg) int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter) { - if (!smu7_is_smc_ram_running(hwmgr)) { - return -EINVAL; - } - PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter); -- cgit v1.2.1 From 5eeae247d227c448d4db8f60ce184ddb0e0feca0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 10 Apr 2018 10:15:26 -0500 Subject: drm/amdgpu/gfx9: cache DB_DEBUG2 and make it available to userspace MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Userspace needs to query this value to work around a hw bug in certain cases. Acked-by: Nicolai Hähnle Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + drivers/gpu/drm/amd/amdgpu/soc15.c | 3 +++ 3 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7e5defbfc3b9..7eb0e4846a76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -867,6 +867,8 @@ struct amdgpu_gfx_config { /* gfx configure feature */ uint32_t double_offchip_lds_buf; + /* cached value of DB_DEBUG2 */ + uint32_t db_debug2; }; struct amdgpu_cu_info { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9d39fd5b1822..66bd6c1c82c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1600,6 +1600,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) gfx_v9_0_setup_rb(adev); gfx_v9_0_get_cu_info(adev, &adev->gfx.cu_info); + adev->gfx.config.db_debug2 = RREG32_SOC15(GC, 0, mmDB_DEBUG2); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2e9ebe8db5cc..65e781f05c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -287,6 +287,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, + { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, }; static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, @@ -315,6 +316,8 @@ static uint32_t soc15_get_register_value(struct amdgpu_device *adev, } else { if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) return adev->gfx.config.gb_addr_config; + else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) + return adev->gfx.config.db_debug2; return RREG32(reg_offset); } } -- cgit v1.2.1 From 642ad57058baaa2c105925a75c153bb486877513 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 12 Apr 2018 10:51:51 -0400 Subject: Revert "drm/amd/display: fix dereferencing possible ERR_PTR()" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit cd2d6c92a8e39d7e50a5af9fcc67d07e6a89e91d. Cc: Shirish S Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Michel Dänzer Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 265f0166f688..0c29f3b97398 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4941,9 +4941,6 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state, return -EDEADLK; crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - if (crtc->primary == plane && crtc_state->active) { if (!plane_state->fb) return -EINVAL; -- cgit v1.2.1 From 23b9ad21b262b9a85e9b85813e4adfcfb0dd96b3 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 12 Apr 2018 10:51:52 -0400 Subject: Revert "drm/amd/display: disable CRTCs with NULL FB on their primary plane (V2)" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This seems to cause flickering and lock-ups for a wide range of users. Revert until we've found a proper fix for the flickering and lock-ups. This reverts commit 36cc549d59864b7161f0e23d710c1c4d1b9cf022. Cc: Shirish S Cc: Alex Deucher Cc: stable@vger.kernel.org Reviewed-by: Michel Dänzer Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 28 ----------------------- 1 file changed, 28 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0c29f3b97398..2368ade4bae0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4925,30 +4925,6 @@ static int dm_update_planes_state(struct dc *dc, return ret; } -static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state, - struct drm_crtc *crtc) -{ - struct drm_plane *plane; - struct drm_crtc_state *crtc_state; - - WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc)); - - drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { - struct drm_plane_state *plane_state = - drm_atomic_get_plane_state(state, plane); - - if (IS_ERR(plane_state)) - return -EDEADLK; - - crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); - if (crtc->primary == plane && crtc_state->active) { - if (!plane_state->fb) - return -EINVAL; - } - } - return 0; -} - static int amdgpu_dm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { @@ -4972,10 +4948,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, goto fail; for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - ret = dm_atomic_check_plane_state_fb(state, crtc); - if (ret) - goto fail; - if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->color_mgmt_changed) continue; -- cgit v1.2.1 From c73a3626619018adfa2bb0fa1e64310be8e73152 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 11 Apr 2018 17:57:13 -0500 Subject: drm/amdgpu/powerplay: fix smu7_get_memory_type for fiji Fiji uses a different register than other smu7 asics, but we already have this info in the base driver so just use that. Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 21c021ba0f49..97b7c2333f19 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4150,13 +4150,9 @@ static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr) static int smu7_get_memory_type(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - uint32_t temp; - - temp = cgs_read_register(hwmgr->device, mmMC_SEQ_MISC0); + struct amdgpu_device *adev = hwmgr->adev; - data->is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == - ((temp & MC_SEQ_MISC0_GDDR5_MASK) >> - MC_SEQ_MISC0_GDDR5_SHIFT)); + data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5); return 0; } -- cgit v1.2.1 From 9da00630188da6e8ad1596c2b58809c833b16154 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 11 Apr 2018 18:09:39 -0500 Subject: drm/amdgpu/powerplay: rename smu7_upload_mc_firmware It doesn't actually upload any firmware is just checks the version. The actual upload happens in the gmc modules. Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 97b7c2333f19..ed43dd39b5d6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4071,7 +4071,7 @@ static int smu7_check_states_equal(struct pp_hwmgr *hwmgr, return 0; } -static int smu7_upload_mc_firmware(struct pp_hwmgr *hwmgr) +static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -4200,7 +4200,7 @@ static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; - smu7_upload_mc_firmware(hwmgr); + smu7_check_mc_firmware(hwmgr); tmp_result = smu7_read_clock_registers(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), -- cgit v1.2.1 From 828536385ab0d25b5ddd7153347df04ea3a6961d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 11:58:14 -0500 Subject: drm/amdgpu: add emit_reg_write_reg_wait ring callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This callback writes a value to a register and then reads back another register and waits for a value in a single operation. Provide a helper function using two operations for engines that don't support this opertion. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 20 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 7 +++++++ 3 files changed, 28 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 7eb0e4846a76..c25ee750c362 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1806,6 +1806,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) +#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index d5f526f38e50..49cad08b5c16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -459,6 +459,26 @@ void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring) spin_unlock(&adev->ring_lru_list_lock); } +/** + * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper + * + * @adev: amdgpu_device pointer + * @reg0: register to write + * @reg1: register to wait on + * @ref: reference value to write/wait on + * @mask: mask to wait on + * + * Helper for rings that don't support write and wait in a + * single oneshot packet. + */ +void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + amdgpu_ring_emit_wreg(ring, reg0, ref); + amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask); +} + /* * Debugfs info */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 1a5911882657..08fcdf6f7b53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -154,6 +154,9 @@ struct amdgpu_ring_funcs { void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask); + void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask); void (*emit_tmz)(struct amdgpu_ring *ring, bool start); /* priority functions */ void (*set_priority) (struct amdgpu_ring *ring, @@ -228,6 +231,10 @@ int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type, int *blacklist, int num_blacklist, bool lru_pipe_order, struct amdgpu_ring **ring); void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t val0, + uint32_t reg1, uint32_t val1); + static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) { int i = 0; -- cgit v1.2.1 From 10ed3c3190d38f189ed6857cecca1a2eb6de33a3 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 15:07:50 -0500 Subject: drm/amdgpu/gfx9: add emit_reg_write_reg_wait ring callback (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for writing and reading back in a single oneshot packet. This is needed to send a tlb invalidation and wait for ack in a single operation. v2: squash the gfx ring stall fix Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Emily Deng --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 66bd6c1c82c0..583f6f616dd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4138,6 +4138,15 @@ static void gfx_v9_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, gfx_v9_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); } +static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, + uint32_t reg0, uint32_t reg1, + uint32_t ref, uint32_t mask) +{ + int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); + + gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20); +} + static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -4459,6 +4468,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_tmz = gfx_v9_0_ring_emit_tmz, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { @@ -4493,6 +4503,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .set_priority = gfx_v9_0_ring_set_priority_compute, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { @@ -4523,6 +4534,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { .emit_rreg = gfx_v9_0_ring_emit_rreg, .emit_wreg = gfx_v9_0_ring_emit_wreg, .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, }; static void gfx_v9_0_set_ring_funcs(struct amdgpu_device *adev) -- cgit v1.2.1 From 4dfe7d7b4e3ba16fc377a48a221bfe8172bc50e1 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 16:51:41 -0500 Subject: drm/amdgpu/sdma4: add emit_reg_write_reg_wait ring callback (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for writing and reading back in a single oneshot packet. This is needed to send a tlb invalidation and wait for ack in a single operation. v2: squash sdma hang fix into this patch Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Emily Deng --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 2c618a1be03e..03a36cbe7557 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1611,6 +1611,7 @@ static const struct amdgpu_ring_funcs sdma_v4_0_ring_funcs = { .pad_ib = sdma_v4_0_ring_pad_ib, .emit_wreg = sdma_v4_0_ring_emit_wreg, .emit_reg_wait = sdma_v4_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev) -- cgit v1.2.1 From 1ab0c9a75f66293a8ea719cc96ae4141218eb0e4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 17:05:19 -0500 Subject: drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for writing and reading back using the helper since the engines doesn't have a oneshot packet. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index eddc57f3b72a..280c0826e183 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1702,6 +1702,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { .end_use = amdgpu_uvd_ring_end_use, .emit_wreg = uvd_v7_0_enc_ring_emit_wreg, .emit_reg_wait = uvd_v7_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) -- cgit v1.2.1 From 3fa0b1cbc0a57a21c1688601f6b9c340441ba3b6 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 17:06:33 -0500 Subject: drm/amdgpu/vce4: add emit_reg_write_reg_wait ring callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for writing and reading back using the helper since the engines doesn't have a oneshot packet. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v4_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c index 73fd48d6c756..8fd1b742985a 100755 --- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c @@ -1081,6 +1081,7 @@ static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = { .end_use = amdgpu_vce_ring_end_use, .emit_wreg = vce_v4_0_emit_wreg, .emit_reg_wait = vce_v4_0_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void vce_v4_0_set_ring_funcs(struct amdgpu_device *adev) -- cgit v1.2.1 From f58b85e3ec0e3d3ddeff6eb16ace23a42516ae70 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 17:06:52 -0500 Subject: drm/amdgpu/vcn1: add emit_reg_write_reg_wait ring callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support for writing and reading back using the helper since the engines doesn't have a oneshot packet. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 8c132673bc79..d9a15338db7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1139,6 +1139,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev) -- cgit v1.2.1 From f8bc903707ae87342b97528037e27bf190051c11 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 27 Mar 2018 17:10:56 -0500 Subject: drm/amdgpu/gmc9: use amdgpu_ring_emit_reg_write_reg_wait in gpu tlb flush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use amdgpu_ring_emit_reg_write_reg_wait. On engines that support it, it provides a write and wait in a single packet which avoids a missed ack if a world switch happens between the request and waiting for the ack. Reviewed-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 070946e1e4a7..aeaed7fe9ced 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -385,11 +385,9 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid), upper_32_bits(pd_addr)); - amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_req + eng, req); - - /* wait for the invalidate to complete */ - amdgpu_ring_emit_reg_wait(ring, hub->vm_inv_eng0_ack + eng, - 1 << vmid, 1 << vmid); + amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng, + hub->vm_inv_eng0_ack + eng, + req, 1 << vmid); return pd_addr; } -- cgit v1.2.1 From ebdef28ebbcf767d9fa687acb1d02d97d834c628 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 6 Apr 2018 14:54:09 -0500 Subject: drm/amdgpu/gmc: steal the appropriate amount of vram for fw hand-over (v3) Steal 9 MB for vga emulation and fb if vga is enabled, otherwise, steal enough to cover the current display size as set by the vbios. If no memory is used (e.g., secondary or headless card), skip stolen memory reserve. v2: skip reservation if vram is limited, address Christian's comments v3: squash in fix from Harry Reviewed-and-Tested-by: Andrey Grodzovsky (v2) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 +++++---- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 23 ++++++++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 23 ++++++++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 23 ++++++++++++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 53 +++++++++++++++++++++++++++++---- 5 files changed, 118 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ab73300e6c7f..2be04acf4efb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1441,12 +1441,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } - r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - &adev->stolen_vga_memory, - NULL, NULL); - if (r) - return r; + if (adev->gmc.stolen_size) { + r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->stolen_vga_memory, + NULL, NULL); + if (r) + return r; + } DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 5617cf62c566..24e1ea36b454 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -825,6 +825,25 @@ static int gmc_v6_0_late_init(void *handle) return 0; } +static unsigned gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32(mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + } else { + u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + } + /* return 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + return 0; + return size; +} + static int gmc_v6_0_sw_init(void *handle) { int r; @@ -851,8 +870,6 @@ static int gmc_v6_0_sw_init(void *handle) adev->gmc.mc_mask = 0xffffffffffULL; - adev->gmc.stolen_size = 256 * 1024; - adev->need_dma32 = false; dma_bits = adev->need_dma32 ? 32 : 40; r = pci_set_dma_mask(adev->pdev, DMA_BIT_MASK(dma_bits)); @@ -878,6 +895,8 @@ static int gmc_v6_0_sw_init(void *handle) if (r) return r; + adev->gmc.stolen_size = gmc_v6_0_get_vbios_fb_size(adev); + r = amdgpu_bo_init(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 80054f36e487..93861f9c7773 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -964,6 +964,25 @@ static int gmc_v7_0_late_init(void *handle) return 0; } +static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32(mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + } else { + u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + } + /* return 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + return 0; + return size; +} + static int gmc_v7_0_sw_init(void *handle) { int r; @@ -998,8 +1017,6 @@ static int gmc_v7_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ - adev->gmc.stolen_size = 256 * 1024; - /* set DMA mask + need_dma32 flags. * PCIE - can handle 40-bits. * IGP - can handle 40-bits @@ -1030,6 +1047,8 @@ static int gmc_v7_0_sw_init(void *handle) if (r) return r; + adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev); + /* Memory manager */ r = amdgpu_bo_init(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index d71d4cb68f9c..fbd8f56c70f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1055,6 +1055,25 @@ static int gmc_v8_0_late_init(void *handle) return 0; } +static unsigned gmc_v8_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ + u32 d1vga_control = RREG32(mmD1VGA_CONTROL); + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + } else { + u32 viewport = RREG32(mmVIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + } + /* return 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + return 0; + return size; +} + #define mmMC_SEQ_MISC0_FIJI 0xA71 static int gmc_v8_0_sw_init(void *handle) @@ -1096,8 +1115,6 @@ static int gmc_v8_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ - adev->gmc.stolen_size = 256 * 1024; - /* set DMA mask + need_dma32 flags. * PCIE - can handle 40-bits. * IGP - can handle 40-bits @@ -1128,6 +1145,8 @@ static int gmc_v8_0_sw_init(void *handle) if (r) return r; + adev->gmc.stolen_size = gmc_v8_0_get_vbios_fb_size(adev); + /* Memory manager */ r = amdgpu_bo_init(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index aeaed7fe9ced..3071f51d6ca6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -57,6 +57,14 @@ #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +/* add these here since we already include dce12 headers and these are for DCN */ +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d +#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10 +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL +#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L + /* XXX Move this macro to VEGA10 header file, which is like vid.h for VI.*/ #define AMDGPU_NUM_OF_VMIDS 8 @@ -791,6 +799,43 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) return amdgpu_gart_table_vram_alloc(adev); } +static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) +{ +#if 0 + u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL); +#endif + unsigned size; + + if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { + size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ + } else { + u32 viewport; + + switch (adev->asic_type) { + case CHIP_RAVEN: + viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION); + size = (REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, + HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) * + 4); + break; + case CHIP_VEGA10: + case CHIP_VEGA12: + default: + viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE); + size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) * + REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_WIDTH) * + 4); + break; + } + } + /* return 0 if the pre-OS buffer uses up most of vram */ + if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) + return 0; + return size; +} + static int gmc_v9_0_sw_init(void *handle) { int r; @@ -842,12 +887,6 @@ static int gmc_v9_0_sw_init(void *handle) */ adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */ - /* - * It needs to reserve 8M stolen memory for vega10 - * TODO: Figure out how to avoid that... - */ - adev->gmc.stolen_size = 8 * 1024 * 1024; - /* set DMA mask + need_dma32 flags. * PCIE - can handle 44-bits. * IGP - can handle 44-bits @@ -872,6 +911,8 @@ static int gmc_v9_0_sw_init(void *handle) if (r) return r; + adev->gmc.stolen_size = gmc_v9_0_get_vbios_fb_size(adev); + /* Memory manager */ r = amdgpu_bo_init(adev); if (r) -- cgit v1.2.1 From 6f752ec2c20c6a575da29d5b297980f376830e6b Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Fri, 6 Apr 2018 14:54:10 -0500 Subject: drm/amdgpu: Free VGA stolen memory as soon as possible. Reserved VRAM is used to avoid overriding pre OS FB. Once our display stack takes over we don't need the reserved VRAM anymore. v2: Remove comment, we know actually why we need to reserve the stolen VRAM. Fix return type for amdgpu_ttm_late_init. v3: Return 0 in amdgpu_bo_late_init, rebase on changes to previous patch v4: rebase v5: For GMC9 reserve always just 9M and keep the stolem memory around until GART table curruption on S3 resume is resolved. Reviewed-by: Alex Deucher Signed-off-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 7 +++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 26 ++++++++++++++++++++++++++ 8 files changed, 46 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9e23d6f6f3f3..a160ef0332d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -852,6 +852,13 @@ int amdgpu_bo_init(struct amdgpu_device *adev) return amdgpu_ttm_init(adev); } +int amdgpu_bo_late_init(struct amdgpu_device *adev) +{ + amdgpu_ttm_late_init(adev); + + return 0; +} + void amdgpu_bo_fini(struct amdgpu_device *adev) { amdgpu_ttm_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 3bee13344065..1e9fe85abcbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -251,6 +251,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, int amdgpu_bo_unpin(struct amdgpu_bo *bo); int amdgpu_bo_evict_vram(struct amdgpu_device *adev); int amdgpu_bo_init(struct amdgpu_device *adev); +int amdgpu_bo_late_init(struct amdgpu_device *adev); void amdgpu_bo_fini(struct amdgpu_device *adev); int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, struct vm_area_struct *vma); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2be04acf4efb..29efaac6e3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1517,13 +1517,17 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return 0; } +void amdgpu_ttm_late_init(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); +} + void amdgpu_ttm_fini(struct amdgpu_device *adev) { if (!adev->mman.initialized) return; amdgpu_ttm_debugfs_fini(adev); - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); if (adev->mman.aper_base_kaddr) iounmap(adev->mman.aper_base_kaddr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6ea7de863041..e969c879d87e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -77,6 +77,7 @@ uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); int amdgpu_ttm_init(struct amdgpu_device *adev); +void amdgpu_ttm_late_init(struct amdgpu_device *adev); void amdgpu_ttm_fini(struct amdgpu_device *adev); void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 24e1ea36b454..79f9ac29019b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -819,6 +819,8 @@ static int gmc_v6_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_bo_late_init(adev); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 93861f9c7773..7147bfe25a23 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -958,6 +958,8 @@ static int gmc_v7_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_bo_late_init(adev); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index fbd8f56c70f3..4d970daa65f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1049,6 +1049,8 @@ static int gmc_v8_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_bo_late_init(adev); + if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); else diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3071f51d6ca6..e6b00b507d4d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -665,6 +665,11 @@ static int gmc_v9_0_late_init(void *handle) unsigned i; int r; + /* + * TODO - Uncomment once GART corruption issue is fixed. + */ + /* amdgpu_bo_late_init(adev); */ + for(i = 0; i < adev->num_rings; ++i) { struct amdgpu_ring *ring = adev->rings[i]; unsigned vmhub = ring->funcs->vmhub; @@ -806,6 +811,13 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) #endif unsigned size; + /* + * TODO Remove once GART corruption is resolved + * Check related code in gmc_v9_0_sw_fini + * */ + size = 9 * 1024 * 1024; + +#if 0 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ } else { @@ -833,6 +845,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) /* return 0 if the pre-OS buffer uses up most of vram */ if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) return 0; + +#endif return size; } @@ -956,6 +970,18 @@ static int gmc_v9_0_sw_fini(void *handle) amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); gmc_v9_0_gart_fini(adev); + + /* + * TODO: + * Currently there is a bug where some memory client outside + * of the driver writes to first 8M of VRAM on S3 resume, + * this overrides GART which by default gets placed in first 8M and + * causes VM_FAULTS once GTT is accessed. + * Keep the stolen memory reservation until the while this is not solved. + * Also check code in gmc_v9_0_get_vbios_fb_size and gmc_v9_0_late_init + */ + amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); + amdgpu_bo_fini(adev); return 0; -- cgit v1.2.1 From 8ee3a52e3f35e064a3bf82f21dc74ddaf9843648 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Mon, 16 Apr 2018 10:07:02 +0800 Subject: drm/gpu-sched: fix force APP kill hang(v4) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit issue: there are VMC page fault occurred if force APP kill during 3dmark test, the cause is in entity_fini we manually signal all those jobs in entity's queue which confuse the sync/dep mechanism: 1)page fault occurred in sdma's clear job which operate on shadow buffer, and shadow buffer's Gart table is cleaned by ttm_bo_release since the fence in its reservation was fake signaled by entity_fini() under the case of SIGKILL received. 2)page fault occurred in gfx' job because during the lifetime of gfx job we manually fake signal all jobs from its entity in entity_fini(), thus the unmapping/clear PTE job depend on those result fence is satisfied and sdma start clearing the PTE and lead to GFX page fault. fix: 1)should at least wait all jobs already scheduled complete in entity_fini() if SIGKILL is the case. 2)if a fence signaled and try to clear some entity's dependency, should set this entity guilty to prevent its job really run since the dependency is fake signaled. v2: splitting drm_sched_entity_fini() into two functions: 1)The first one is does the waiting, removes the entity from the runqueue and returns an error when the process was killed. 2)The second one then goes over the entity, install it as completion signal for the remaining jobs and signals all jobs with an error code. v3: 1)Replace the fini1 and fini2 with better name 2)Call the first part before the VM teardown in amdgpu_driver_postclose_kms() and the second part after the VM teardown 3)Keep the original function drm_sched_entity_fini to refine the code. v4: 1)Rename entity->finished to entity->last_scheduled; 2)Rename drm_sched_entity_fini_job_cb() to drm_sched_entity_kill_jobs_cb(); 3)Pass NULL to drm_sched_entity_fini_job_cb() if -ENOENT; 4)Replace the type of entity->fini_status with "int"; 5)Remove the check about entity->finished. Signed-off-by: Monk Liu Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 64 ++++++++++++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +-- 3 files changed, 61 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c25ee750c362..ea1b28536bfc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -681,6 +681,8 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr); +void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 09d35051fdd6..eb80edfb1b0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -111,8 +111,9 @@ failed: return r; } -static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) +static void amdgpu_ctx_fini(struct kref *ref) { + struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); struct amdgpu_device *adev = ctx->adev; unsigned i, j; @@ -125,13 +126,11 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) kfree(ctx->fences); ctx->fences = NULL; - for (i = 0; i < adev->num_rings; i++) - drm_sched_entity_fini(&adev->rings[i]->sched, - &ctx->rings[i].entity); - amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); mutex_destroy(&ctx->lock); + + kfree(ctx); } static int amdgpu_ctx_alloc(struct amdgpu_device *adev, @@ -170,12 +169,15 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, static void amdgpu_ctx_do_release(struct kref *ref) { struct amdgpu_ctx *ctx; + u32 i; ctx = container_of(ref, struct amdgpu_ctx, refcount); - amdgpu_ctx_fini(ctx); + for (i = 0; i < ctx->adev->num_rings; i++) + drm_sched_entity_fini(&ctx->adev->rings[i]->sched, + &ctx->rings[i].entity); - kfree(ctx); + amdgpu_ctx_fini(ref); } static int amdgpu_ctx_free(struct amdgpu_fpriv *fpriv, uint32_t id) @@ -435,16 +437,62 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) idr_init(&mgr->ctx_handles); } +void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) +{ + struct amdgpu_ctx *ctx; + struct idr *idp; + uint32_t id, i; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) { + + if (!ctx->adev) + return; + + for (i = 0; i < ctx->adev->num_rings; i++) + if (kref_read(&ctx->refcount) == 1) + drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, + &ctx->rings[i].entity); + else + DRM_ERROR("ctx %p is still alive\n", ctx); + } +} + +void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) +{ + struct amdgpu_ctx *ctx; + struct idr *idp; + uint32_t id, i; + + idp = &mgr->ctx_handles; + + idr_for_each_entry(idp, ctx, id) { + + if (!ctx->adev) + return; + + for (i = 0; i < ctx->adev->num_rings; i++) + if (kref_read(&ctx->refcount) == 1) + drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, + &ctx->rings[i].entity); + else + DRM_ERROR("ctx %p is still alive\n", ctx); + } +} + void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr) { struct amdgpu_ctx *ctx; struct idr *idp; uint32_t id; + amdgpu_ctx_mgr_entity_cleanup(mgr); + idp = &mgr->ctx_handles; idr_for_each_entry(idp, ctx, id) { - if (kref_put(&ctx->refcount, amdgpu_ctx_do_release) != 1) + if (kref_put(&ctx->refcount, amdgpu_ctx_fini) != 1) DRM_ERROR("ctx %p is still alive\n", ctx); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bd9e723dbb2b..1ed379524117 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -913,8 +913,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, return; pm_runtime_get_sync(dev->dev); - - amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_ctx_mgr_entity_fini(&fpriv->ctx_mgr); if (adev->asic_type != CHIP_RAVEN) { amdgpu_uvd_free_handles(adev, file_priv); @@ -935,6 +934,8 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, pd = amdgpu_bo_ref(fpriv->vm.root.base.bo); amdgpu_vm_fini(adev, &fpriv->vm); + amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + if (pasid) amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); amdgpu_bo_unref(&pd); -- cgit v1.2.1 From a0701722b68e69443dd3dd7970a9f343b7560a2c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 14:11:45 +0800 Subject: Revert "drm/amd/powerply: fix power reading on Fiji" we don't have limit of [50ms, 4sec] sampling period. smu calculate average gpu power in real time. we can read average gpu power through smu message or read special register. This reverts commit 462d8dcc9fec0d89f1ff6a1f93f1d4f670878c71. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index ed43dd39b5d6..5bccf895ba41 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3364,8 +3364,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, "Failed to start pm status log!", return -1); - /* Sampling period from 50ms to 4sec */ - msleep_interruptible(200); + msleep_interruptible(20); PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample), -- cgit v1.2.1 From b89c71d1eb1c43c6c61f6d74d7454702d367f18b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 14:17:09 +0800 Subject: drm/amd/pp: Refine get_gpu_power for VI pkgpwr is the average gpu power of 100ms. it is calculated by firmware in real time. 1. we can send smu message PPSMC_MSG_GetCurrPkgPwr to read currentpkgpwr directly. 2. On Fiji/tonga/bonaire/hawwii, without PPSMC_MSG_GetCurrPkgPwr support. Send PPSMC_MSG_PmStatusLogStart/Sample to let smu write currentpkgpwr to ixSMU_PM_STATUS_94. driver can read pkgpwr from ixSMU_PM_STATUS_94. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 51 ++++++++++++---------- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 10 +++-- 2 files changed, 34 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 5bccf895ba41..51867c702540 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3359,30 +3359,33 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, struct pp_gpu_power *query) { - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PmStatusLogStart), - "Failed to start pm status log!", - return -1); - - msleep_interruptible(20); - - PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_PmStatusLogSample), - "Failed to sample pm status log!", - return -1); - - query->vddc_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_40); - query->vddci_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_49); - query->max_gpu_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_94); - query->average_gpu_power = cgs_read_ind_register(hwmgr->device, - CGS_IND_REG__SMC, - ixSMU_PM_STATUS_95); + int i; + + if (!query) + return -EINVAL; + + + memset(query, 0, sizeof *query); + + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); + query->average_gpu_power = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + + if (query->average_gpu_power != 0) + return 0; + + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_PM_STATUS_94, 0); + + for (i = 0; i < 20; i++) { + mdelay(1); + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); + query->average_gpu_power = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + ixSMU_PM_STATUS_94); + if (query->average_gpu_power != 0) + break; + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index fb32a3fcc278..10a112376fd1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -171,8 +171,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); - if (ret != 1) - pr_info("\n failed to send pre message %x ret is %d \n", msg, ret); + if (ret == 0xFE) + pr_debug("last message was not supported\n"); + else if (ret != 1) + pr_info("\n last message was failed ret is %d\n", ret); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); @@ -180,7 +182,9 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); - if (ret != 1) + if (ret == 0xFE) + pr_debug("message %x was not supported\n", msg); + else if (ret != 1) pr_info("\n failed to send message %x ret is %d \n", msg, ret); return 0; -- cgit v1.2.1 From 5b79d0482f3c1e8d5d78bd573a41e91dd9f0a5a1 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 4 Apr 2018 15:37:35 +0800 Subject: drm/amd/pp: Remove struct pp_gpu_power Currently smu only calculate average gpu power in real time. for vddc/vddci/max power, User need to set start time and end time, firmware can calculate the average vddc/vddci/max power. but the type of return values is not unified. For Vi, return type is uint. For vega, return type is float. so this struct can't be suitable for all asics. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 7 ++----- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 22 +++++++-------------- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 7 ------- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 23 +++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 17 +++++++--------- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | 13 ++++-------- 6 files changed, 29 insertions(+), 60 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 1ed379524117..efff211d7d90 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -701,9 +701,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file } } case AMDGPU_INFO_SENSOR: { - struct pp_gpu_power query = {0}; - int query_size = sizeof(query); - if (!adev->pm.dpm_enabled) return -ENOENT; @@ -746,10 +743,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file /* get average GPU power */ if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, - (void *)&query, &query_size)) { + (void *)&ui32, &ui32_size)) { return -EINVAL; } - ui32 = query.average_gpu_power >> 8; + ui32 >>= 8; break; case AMDGPU_INFO_SENSOR_VDDNB: /* get VDDNB in millivolts */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index e5f60fc31516..744f105a2c75 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1020,8 +1020,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, { struct amdgpu_device *adev = dev_get_drvdata(dev); struct drm_device *ddev = adev->ddev; - struct pp_gpu_power query = {0}; - int r, size = sizeof(query); + u32 query = 0; + int r, size = sizeof(u32); unsigned uw; /* Can't get power when the card is off */ @@ -1041,7 +1041,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, return r; /* convert to microwatts */ - uw = (query.average_gpu_power >> 8) * 1000000; + uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; return snprintf(buf, PAGE_SIZE, "%u\n", uw); } @@ -1752,7 +1752,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) { uint32_t value; - struct pp_gpu_power query = {0}; + uint32_t query = 0; int size; /* sanity check PP is enabled */ @@ -1775,17 +1775,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a seq_printf(m, "\t%u mV (VDDGFX)\n", value); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) seq_printf(m, "\t%u mV (VDDNB)\n", value); - size = sizeof(query); - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) { - seq_printf(m, "\t%u.%u W (VDDC)\n", query.vddc_power >> 8, - query.vddc_power & 0xff); - seq_printf(m, "\t%u.%u W (VDDCI)\n", query.vddci_power >> 8, - query.vddci_power & 0xff); - seq_printf(m, "\t%u.%u W (max GPU)\n", query.max_gpu_power >> 8, - query.max_gpu_power & 0xff); - seq_printf(m, "\t%u.%u W (average GPU)\n", query.average_gpu_power >> 8, - query.average_gpu_power & 0xff); - } + size = sizeof(uint32_t); + if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) + seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); size = sizeof(value); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 5c840c022b52..1bec9072e36f 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -149,13 +149,6 @@ struct pp_states_info { uint32_t states[16]; }; -struct pp_gpu_power { - uint32_t vddc_power; - uint32_t vddci_power; - uint32_t max_gpu_power; - uint32_t average_gpu_power; -}; - #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 51867c702540..f5b3617364f1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3356,36 +3356,34 @@ static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr, return 0; } -static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) +static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) { int i; + u32 tmp = 0; if (!query) return -EINVAL; - - memset(query, 0, sizeof *query); - smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); - query->average_gpu_power = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); - if (query->average_gpu_power != 0) + if (tmp != 0) return 0; smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_PM_STATUS_94, 0); - for (i = 0; i < 20; i++) { + for (i = 0; i < 10; i++) { mdelay(1); smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); - query->average_gpu_power = cgs_read_ind_register(hwmgr->device, + tmp = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_PM_STATUS_94); - if (query->average_gpu_power != 0) + if (tmp != 0) break; } + *query = tmp; return 0; } @@ -3438,10 +3436,7 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; return 0; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - return -EINVAL; - *size = sizeof(struct pp_gpu_power); - return smu7_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); + return smu7_get_gpu_power(hwmgr, (uint32_t *)value); case AMDGPU_PP_SENSOR_VDDGFX: if ((data->vr_config & 0xff) == 0x2) val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 127c550e8bb1..0bbc5647d77d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -3781,16 +3781,18 @@ static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) } static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) + uint32_t *query) { uint32_t value; + if (!query) + return -EINVAL; + smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr); value = smum_get_argument(hwmgr); - /* power value is an integer */ - memset(query, 0, sizeof *query); - query->average_gpu_power = value << 8; + /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */ + *query = value << 8; return 0; } @@ -3840,12 +3842,7 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - ret = -EINVAL; - else { - *size = sizeof(struct pp_gpu_power); - ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); - } + ret = vega10_get_gpu_power(hwmgr, (uint32_t *)value); break; case AMDGPU_PP_SENSOR_VDDGFX: val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_PLANE0_CURRENTVID) & diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 3e1ed0aca29c..782e2098824d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c @@ -1113,8 +1113,7 @@ static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) return (mem_clk * 100); } -static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, - struct pp_gpu_power *query) +static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, uint32_t *query) { #if 0 uint32_t value; @@ -1126,7 +1125,7 @@ static int vega12_get_gpu_power(struct pp_hwmgr *hwmgr, vega12_read_arg_from_smc(hwmgr, &value); /* power value is an integer */ - query->average_gpu_power = value << 8; + *query = value << 8; #endif return 0; } @@ -1235,12 +1234,8 @@ static int vega12_read_sensor(struct pp_hwmgr *hwmgr, int idx, *size = 4; break; case AMDGPU_PP_SENSOR_GPU_POWER: - if (*size < sizeof(struct pp_gpu_power)) - ret = -EINVAL; - else { - *size = sizeof(struct pp_gpu_power); - ret = vega12_get_gpu_power(hwmgr, (struct pp_gpu_power *)value); - } + ret = vega12_get_gpu_power(hwmgr, (uint32_t *)value); + break; default: ret = -EINVAL; -- cgit v1.2.1 From 8db42a701326c8872d8634c7b4c0d045bf95f394 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 11 Apr 2018 18:11:49 +0800 Subject: drm/amd/pp: Clear smu response register before send smu message smu firmware do not update response register immediately under some delay tasks, we may read out the original value. so need to clear the register before send smu message. Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c | 4 +--- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 2a93f3a8e4f0..2d4ec8ac3a08 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -208,9 +208,7 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { int ret; - if (!ci_is_smc_ram_running(hwmgr)) - return -EINVAL; - + cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 10a112376fd1..64d33b775906 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -176,6 +176,7 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) else if (ret != 1) pr_info("\n last message was failed ret is %d\n", ret); + cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); -- cgit v1.2.1 From 63c2f7ed7bb3e98b4b22d5b136f4749706f17d36 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Sun, 8 Apr 2018 16:57:55 +0800 Subject: drm/amd/pp: Move common code to smu_helper.c Reviewed-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 30 +------- drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 82 ++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h | 24 +++++++ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 43 +----------- 4 files changed, 109 insertions(+), 70 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index f5b3617364f1..68aae09a886a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -793,32 +793,6 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) return 0; } -static int smu7_get_voltage_dependency_table( - const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint8_t i = 0; - PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count), - "Voltage Lookup Table empty", - return -EINVAL); - - dep_table->count = allowed_dep_table->count; - for (i=0; icount; i++) { - dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; - dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; - dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; - dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; - dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; - dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; - dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; - dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; - dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; - dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; - } - - return 0; -} - static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -846,7 +820,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) entries[i].vddc = dep_sclk_table->entries[i].vddc; } - smu7_get_voltage_dependency_table(dep_sclk_table, + smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table, (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk)); odn_table->odn_memory_clock_dpm_levels.num_of_pl = @@ -858,7 +832,7 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) entries[i].vddc = dep_mclk_table->entries[i].vddc; } - smu7_get_voltage_dependency_table(dep_mclk_table, + smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table, (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk)); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 529be3cd768a..7c23741619b6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -624,3 +624,85 @@ void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, return NULL; } + +int smu_get_voltage_dependency_table_ppt_v1( + const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint8_t i = 0; + PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count), + "Voltage Lookup Table empty", + return -EINVAL); + + dep_table->count = allowed_dep_table->count; + for (i=0; icount; i++) { + dep_table->entries[i].clk = allowed_dep_table->entries[i].clk; + dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd; + dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset; + dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc; + dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx; + dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci; + dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd; + dep_table->entries[i].phases = allowed_dep_table->entries[i].phases; + dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable; + dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset; + } + + return 0; +} + +int smu_set_watermarks_for_clocks_ranges(void *wt_table, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + uint32_t i; + struct watermarks *table = wt_table; + + if (!table || wm_with_clock_ranges) + return -EINVAL; + + if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4) + return -EINVAL; + + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { + table->WatermarkRow[1][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / + 100); + table->WatermarkRow[1][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / + 100); + table->WatermarkRow[1][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[1][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[1][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; + } + + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { + table->WatermarkRow[0][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / + 100); + table->WatermarkRow[0][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / + 100); + table->WatermarkRow[0][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[0][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[0][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; + } + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h index 14ee162ac92a..916cc01e7652 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.h @@ -26,10 +26,27 @@ struct pp_atomctrl_voltage_table; struct pp_hwmgr; struct phm_ppt_v1_voltage_lookup_table; +struct Watermarks_t; +struct pp_wm_sets_with_clock_ranges_soc15; uint8_t convert_to_vid(uint16_t vddc); uint16_t convert_to_vddc(uint8_t vid); +struct watermark_row_generic_t { + uint16_t MinClock; + uint16_t MaxClock; + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +}; + +struct watermarks { + struct watermark_row_generic_t WatermarkRow[2][4]; + uint32_t padding[7]; +}; + extern int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, uint32_t index, uint32_t value, uint32_t mask); @@ -85,6 +102,13 @@ int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr); void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev); +int smu_get_voltage_dependency_table_ppt_v1( + const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table); + +int smu_set_watermarks_for_clocks_ranges(void *wt_table, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges); + #define PHM_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT #define PHM_FIELD_MASK(reg, field) reg##__##field##_MASK diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 0bbc5647d77d..384aa07206c0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -4367,50 +4367,9 @@ static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = hwmgr->backend; Watermarks_t *table = &(data->smc_state_table.water_marks_table); int result = 0; - uint32_t i; if (!data->registry_data.disable_water_mark) { - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { - table->WatermarkRow[WM_DCEFCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; - } - - for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { - table->WatermarkRow[WM_SOCCLK][i].MinClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxClock = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MinUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].MaxUclk = - cpu_to_le16((uint16_t) - (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / - 100); - table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) - wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; - } + smu_set_watermarks_for_clocks_ranges(table, wm_with_clock_ranges); data->water_marks_bitmap = WaterMarksExist; } -- cgit v1.2.1 From 1afd30efeddbb1b32cf35d3bf6477b35690eeca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 10 Apr 2018 13:42:29 +0200 Subject: drm/amdgpu: revert "add new bo flag that indicates BOs don't need fallback (v2)" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 6f51d28bfe8e1a676de5cd877639245bed3cc818. Makes fallback handling to complicated. This is just a feature for the GEM interface and shouldn't leak into the core BO create function. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 68af2f878bc9..e1756b68a17b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -385,8 +385,7 @@ retry: amdgpu_bo_in_cpu_visible_vram(bo)) p->bytes_moved_vis += ctx.bytes_moved; - if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains && - !(bo->flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) { + if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { domain = bo->allowed_domains; goto retry; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a160ef0332d6..1de6864da717 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -388,8 +388,6 @@ retry: drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->preferred_domains = preferred_domains; - bo->allowed_domains = allowed_domains; bo->flags = flags; @@ -426,8 +424,7 @@ retry: r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, &ctx, acc_size, NULL, resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device && - !(flags & AMDGPU_GEM_CREATE_NO_FALLBACK)) { + if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; goto retry; -- cgit v1.2.1 From 0808210478c76606c12bb475b3272b7780240812 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 10 Apr 2018 13:42:38 +0200 Subject: drm/amdgpu: revert "Don't change preferred domian when fallback GTT v6" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 7d1ca1325260a9e9329b10a21e3692e6f188936f. Makes fallback handling to complicated. This is just a feature for the GEM interface and shouldn't leak into the core BO create function. The intended change to preserve the preferred domains is implemented in a follow up patch. Signed-off-by: Christian König Acked-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 16 +++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 37 +++++++++++------------------- 2 files changed, 27 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 28c2706e48d7..46b9ea4e6103 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -56,11 +56,23 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, alignment = PAGE_SIZE; } +retry: r = amdgpu_bo_create(adev, size, alignment, initial_domain, flags, type, resv, &bo); if (r) { - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); + if (r != -ERESTARTSYS) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } + + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; + goto retry; + } + DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", + size, initial_domain, alignment, r); + } return r; } *obj = &bo->gem_base; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1de6864da717..24f582c696cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -356,7 +356,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, struct amdgpu_bo *bo; unsigned long page_align; size_t acc_size; - u32 domains, preferred_domains, allowed_domains; int r; page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; @@ -370,24 +369,22 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size, sizeof(struct amdgpu_bo)); - preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); - allowed_domains = preferred_domains; - if (type != ttm_bo_type_kernel && - allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) - allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; - domains = preferred_domains; -retry: bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); + bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); + bo->allowed_domains = bo->preferred_domains; + if (type != ttm_bo_type_kernel && + bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) + bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; bo->flags = flags; @@ -420,20 +417,12 @@ retry: #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, domains); + amdgpu_ttm_placement_from_domain(bo, domain); + r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, &bo->placement, page_align, &ctx, acc_size, NULL, resv, &amdgpu_ttm_bo_destroy); - if (unlikely(r && r != -ERESTARTSYS) && type == ttm_bo_type_device) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } else if (domains != allowed_domains) { - domains = allowed_domains; - goto retry; - } - } - if (unlikely(r)) + if (unlikely(r != 0)) return r; if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && -- cgit v1.2.1 From 361883649221f975d915e4bc79907da71017f38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Mon, 19 Mar 2018 11:49:14 +0100 Subject: drm/amdgpu: re-validate per VM BOs if required v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If a per VM BO ends up in a allowed domain it never moves back into the prefered domain. v2: move the extra handling into amdgpu_vm_bo_update when we exit the state machine. Make memory type handling generic. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index da55a78d7380..f0fbc331aa30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1556,7 +1556,20 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } spin_lock(&vm->status_lock); - list_del_init(&bo_va->base.vm_status); + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + unsigned mem_type = bo->tbo.mem.mem_type; + + /* If the BO is not in its preferred location add it back to + * the evicted list so that it gets validated again on the + * next command submission. + */ + if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + list_add_tail(&bo_va->base.vm_status, &vm->evicted); + else + list_del_init(&bo_va->base.vm_status); + } else { + list_del_init(&bo_va->base.vm_status); + } spin_unlock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); -- cgit v1.2.1 From 03a27de648d8a2b2bf59a7f467855fac2d850350 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 10 Apr 2018 13:45:00 -0400 Subject: drm/amd/pp: Adding set_watermarks_for_clocks_ranges for SMU10 The function is never implemented for raven on linux. It follows similair implementation as on windows. SMU still needs to notify SMC and copy WM table, which is added here. But on other Asics such as Vega this step is not implemented. Signed-off-by: Mikita Lipski Reviewed-by: Rex Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 13 +++++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h | 1 + 2 files changed, 14 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 6ba3b1fa57aa..b712d16a9e6f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -992,6 +992,18 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx, return ret; } +static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + struct smu10_hwmgr *data = hwmgr->backend; + Watermarks_t *table = &(data->water_marks_table); + int result = 0; + + smu_set_watermarks_for_clocks_ranges(table,wm_with_clock_ranges); + smum_smc_table_manager(hwmgr, (uint8_t *)table, (uint16_t)SMU10_WMTABLE, false); + data->water_marks_exist = true; + return result; +} static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); @@ -1021,6 +1033,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .get_current_shallow_sleep_clocks = smu10_get_current_shallow_sleep_clocks, .get_clock_by_type_with_latency = smu10_get_clock_by_type_with_latency, .get_clock_by_type_with_voltage = smu10_get_clock_by_type_with_voltage, + .set_watermarks_for_clocks_ranges = smu10_set_watermarks_for_clocks_ranges, .get_max_high_clocks = smu10_get_max_high_clocks, .read_sensor = smu10_read_sensor, .set_active_display_count = smu10_set_active_display_count, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h index 175c3a592b6c..f68b218b9bce 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h @@ -290,6 +290,7 @@ struct smu10_hwmgr { bool vcn_dpg_mode; bool gfx_off_controled_by_driver; + bool water_marks_exist; Watermarks_t water_marks_table; struct smu10_clock_voltage_information clock_vol_info; DpmClocks_t clock_table; -- cgit v1.2.1 From 5c3517d0c2ead443f378173c698f3bd09cb89d72 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 11 Apr 2018 16:25:26 -0400 Subject: drm/amd/pp: Adding a function to store cc6 data in SMU10 Filling the smu10_store_cc6_data based on the implementation of Windows Powerplay. There is an uncertainty with one of the parameters passed to the function pstate_switch_disable - is not a part of smu10 private data structure. So in the function its just ignored. Signed-off-by: Mikita Lipski Reviewed-by: Rex Zhu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index b712d16a9e6f..0f252265a753 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -699,6 +699,16 @@ static int smu10_set_cpu_power_state(struct pp_hwmgr *hwmgr) static int smu10_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time, bool cc6_disable, bool pstate_disable, bool pstate_switch_disable) { + struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); + + if (separation_time != data->separation_time || + cc6_disable != data->cc6_disable || + pstate_disable != data->pstate_disable) { + data->separation_time = separation_time; + data->cc6_disable = cc6_disable; + data->pstate_disable = pstate_disable; + data->cc6_setting_changed = true; + } return 0; } -- cgit v1.2.1 From 2c773de2ecb8c327f2448bd1eecad224e9227087 Mon Sep 17 00:00:00 2001 From: Shirish S Date: Mon, 16 Apr 2018 12:17:57 +0530 Subject: drm/amdgpu: defer test IBs on the rings at boot (V3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_ib_ring_tests() runs test IB's on rings at boot contributes to ~500 ms of amdgpu driver's boot time. This patch defers it and ensures that its executed in amdgpu_info_ioctl() if it wasn't scheduled. V2: Use queue_delayed_work() & flush_delayed_work(). V3: removed usage of separate wq, ensure ib tests is run before enabling clockgating. Signed-off-by: Shirish S Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 17 ++++++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 3 +++ 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 62d6505ade84..d7f2bbdfd348 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1656,6 +1656,10 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) if (amdgpu_emu_mode == 1) return 0; + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + for (i = 0; i < adev->num_ip_blocks; i++) { if (!adev->ip_blocks[i].status.valid) continue; @@ -1706,8 +1710,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) } } - mod_delayed_work(system_wq, &adev->late_init_work, - msecs_to_jiffies(AMDGPU_RESUME_MS)); + queue_delayed_work(system_wq, &adev->late_init_work, + msecs_to_jiffies(AMDGPU_RESUME_MS)); amdgpu_device_fill_reset_magic(adev); @@ -2374,10 +2378,6 @@ fence_driver_init: goto failed; } - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); - if (amdgpu_sriov_vf(adev)) amdgpu_virt_init_data_exchange(adev); @@ -2639,11 +2639,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) } amdgpu_fence_driver_resume(adev); - if (resume) { - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); - } r = amdgpu_device_ip_late_init(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index efff211d7d90..4e15b6fe2839 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -279,6 +279,9 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file if (!info->return_size || !info->return_pointer) return -EINVAL; + /* Ensure IB tests are run on ring */ + flush_delayed_work(&adev->late_init_work); + switch (info->query) { case AMDGPU_INFO_ACCEL_WORKING: ui32 = adev->accel_working; -- cgit v1.2.1 From c2f84e03a01ad09f18f9f132f8b1e78f699a5494 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 12 Apr 2018 16:37:09 -0400 Subject: drm/amd/display: Don't program bypass on linear regamma LUT Even though this is required for degamma since DCE HW only supports a couple predefined LUTs we can just program the LUT directly for regamma. This fixes dark screens which occurs when we program regamma to bypass while degamma is using srgb LUT. Signed-off-by: Harry Wentland Reviewed-by: Leo Li Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index ef5fad8c5aac..e3d90e918d1b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -139,13 +139,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) lut = (struct drm_color_lut *)blob->data; lut_size = blob->length / sizeof(struct drm_color_lut); - if (__is_lut_linear(lut, lut_size)) { - /* Set to bypass if lut is set to linear */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } - gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; -- cgit v1.2.1 From c74db7e42d9b538d2fa582cf0efe5640b25e950d Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Tue, 3 Apr 2018 11:36:14 -0400 Subject: drm/amd/display: dal 3.1.42 Signed-off-by: Eric Yang Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0f566a1ba35b..7ac8a1bee5ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.41" +#define DC_VER "3.1.42" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From ab892598d033d1943e1dcb0326f2622d6026f524 Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 29 Mar 2018 10:56:17 -0400 Subject: drm/amd/display: fix brightness level after resume from suspend Adding missing call to cache current backlight values. Otherwise the brightness resets to default value on resume. Signed-off-by: Roman Li Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 13 +++++++++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++ drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 4 +++- 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0cd286f8eaa0..b44cf52090a5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -2018,6 +2018,19 @@ bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level, return true; } +bool dc_link_set_abm_disable(const struct dc_link *link) +{ + struct dc *core_dc = link->ctx->dc; + struct abm *abm = core_dc->res_pool->abm; + + if ((abm == NULL) || (abm->funcs->set_backlight_level == NULL)) + return false; + + abm->funcs->set_abm_immediate_disable(abm); + + return true; +} + bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) { struct dc *core_dc = link->ctx->dc; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index eeff98741293..8a716baa1203 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -141,6 +141,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level, uint32_t frame_ramp, const struct dc_stream_state *stream); +bool dc_link_set_abm_disable(const struct dc_link *dc_link); + bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 68a182ce53c7..15897f0a9616 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1046,8 +1046,10 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; - if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) + if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { link->dc->hwss.edp_backlight_control(link, false); + dc_link_set_abm_disable(link); + } if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc); -- cgit v1.2.1 From fcb2008a70c8dffc9179ce41838496ba816e14a1 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Tue, 3 Apr 2018 11:23:11 -0400 Subject: drm/amd/display: Move dp_pixel_encoding_type to stream_encoder include Signed-off-by: Eric Bernstein Reviewed-by: Nikola Cornij Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 17 ----------------- .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 9fe73028d588..cf7433ebf91a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -186,23 +186,6 @@ enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA }; -enum dp_pixel_encoding_type { - DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000, - DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001, - DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002, - DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003, - DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004, - DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005 -}; - -enum dp_component_depth { - DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000, - DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001, - DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002, - DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003, - DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004 -}; - enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 5c21336cae4c..cfa7ec9517ae 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -29,11 +29,29 @@ #define STREAM_ENCODER_H_ #include "audio_types.h" +#include "hw_shared.h" struct dc_bios; struct dc_context; struct dc_crtc_timing; +enum dp_pixel_encoding_type { + DP_PIXEL_ENCODING_TYPE_RGB444 = 0x00000000, + DP_PIXEL_ENCODING_TYPE_YCBCR422 = 0x00000001, + DP_PIXEL_ENCODING_TYPE_YCBCR444 = 0x00000002, + DP_PIXEL_ENCODING_TYPE_RGB_WIDE_GAMUT = 0x00000003, + DP_PIXEL_ENCODING_TYPE_Y_ONLY = 0x00000004, + DP_PIXEL_ENCODING_TYPE_YCBCR420 = 0x00000005 +}; + +enum dp_component_depth { + DP_COMPONENT_PIXEL_DEPTH_6BPC = 0x00000000, + DP_COMPONENT_PIXEL_DEPTH_8BPC = 0x00000001, + DP_COMPONENT_PIXEL_DEPTH_10BPC = 0x00000002, + DP_COMPONENT_PIXEL_DEPTH_12BPC = 0x00000003, + DP_COMPONENT_PIXEL_DEPTH_16BPC = 0x00000004 +}; + struct encoder_info_frame { /* auxiliary video information */ struct dc_info_packet avi; @@ -138,6 +156,7 @@ struct stream_encoder_funcs { void (*set_avmute)( struct stream_encoder *enc, bool enable); + }; #endif /* STREAM_ENCODER_H_ */ -- cgit v1.2.1 From fc6de1c565e03f492a3d9725b93092dac0cc1845 Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Tue, 3 Apr 2018 16:07:16 -0400 Subject: drm/amd/display: Fix regamma not affecting full-intensity color values Hardware understands the regamma LUT as a piecewise linear function, with points spaced exponentially along the range. We previously programmed the LUT for range [2^-10, 2^0). This causes (normalized) color values of 1 (=2^0) to miss the programmed LUT, and fall onto the end region. For DCE, the end region is extrapolated using a single (base, slope) pair, using the max y-value from the last point in the curve as base. This presents a problem, since this value affects all three color channels. Scaling down the intensity of say - the blue regamma curve - will not affect it's end region. This is especially noticiable when using RedShift. It scales down the blue and green channels, but leaves full-intensity colors unshifted. Therefore, extend the range to cover [2^-10, 2^1) by programming another hardware segment, containing only one point. That way, we won't be hitting the end region. Note that things are a bit different for DCN, since the end region can be set per-channel. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Krunoslav Kovac Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 15897f0a9616..1b5c11c8fa1f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -456,10 +456,13 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, } else { /* 10 segments - * segment is from 2^-10 to 2^0 + * segment is from 2^-10 to 2^1 + * We include an extra segment for range [2^0, 2^1). This is to + * ensure that colors with normalized values of 1 don't miss the + * LUT. */ region_start = -10; - region_end = 0; + region_end = 1; seg_distr[0] = 4; seg_distr[1] = 4; @@ -471,7 +474,7 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, seg_distr[7] = 4; seg_distr[8] = 4; seg_distr[9] = 4; - seg_distr[10] = -1; + seg_distr[10] = 0; seg_distr[11] = -1; seg_distr[12] = -1; seg_distr[13] = -1; -- cgit v1.2.1 From c5b38aec266deade4067ddc606634ace68d2da8c Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 29 Mar 2018 16:39:10 -0400 Subject: drm/amd/display: fix segfault on insufficient TG during validation Signed-off-by: Dmytro Laktyushkin Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 8d7bc1fa9ffe..d7a92eca8a27 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1700,7 +1700,7 @@ enum dc_status resource_map_pool_resources( pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream); #endif - if (pipe_idx < 0) + if (pipe_idx < 0 || context->res_ctx.pipe_ctx[pipe_idx].stream_res.tg == NULL) return DC_NO_CONTROLLER_RESOURCE; pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx]; -- cgit v1.2.1 From d0f6f1c0319d39b792a7969bf511d5b1870f1f0e Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Fri, 23 Mar 2018 15:25:43 -0400 Subject: drm/amd/display: change dml init to use default structs Signed-off-by: Dmytro Laktyushkin Reviewed-by: Eric Bernstein Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/display_mode_lib.c | 138 ++++++++++++--------- 1 file changed, 76 insertions(+), 62 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c index c109b2c34c8f..fd9d97aab071 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c @@ -26,75 +26,89 @@ #include "display_mode_lib.h" #include "dc_features.h" +static const struct _vcs_dpi_ip_params_st dcn1_0_ip = { + .rob_buffer_size_kbytes = 64, + .det_buffer_size_kbytes = 164, + .dpte_buffer_size_in_pte_reqs = 42, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .pte_chunk_size_kbytes = 2, + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 2, + .line_buffer_size_bits = 589824, + .max_line_buffer_lines = 12, + .IsLineBufferBppFixed = 0, + .LineBufferFixedBpp = -1, + .writeback_luma_buffer_size_kbytes = 12, + .writeback_chroma_buffer_size_kbytes = 8, + .max_num_dpp = 4, + .max_num_wb = 2, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 4, + .max_vscl_ratio = 4, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.10, + .min_vblank_lines = 14, + .dppclk_delay_subtotal = 90, + .dispclk_delay_subtotal = 42, + .dcfclk_cstate_latency = 10, + .max_inter_dcn_tile_repeaters = 8, + .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0, + .bug_forcing_LC_req_same_size_fixed = 0, +}; + +static const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc = { + .sr_exit_time_us = 9.0, + .sr_enter_plus_exit_time_us = 11.0, + .urgent_latency_us = 4.0, + .writeback_latency_us = 12.0, + .ideal_dram_bw_after_urgent_percent = 80.0, + .max_request_size_bytes = 256, + .downspread_percent = 0.5, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 128, + .urgent_out_of_order_return_per_channel_bytes = 256, + .channel_interleave_bytes = 256, + .num_banks = 8, + .num_chans = 2, + .vmm_page_size_bytes = 4096, + .dram_clock_change_latency_us = 17.0, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, +}; + static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project) { - if (project == DML_PROJECT_RAVEN1) { - soc->sr_exit_time_us = 9.0; - soc->sr_enter_plus_exit_time_us = 11.0; - soc->urgent_latency_us = 4.0; - soc->writeback_latency_us = 12.0; - soc->ideal_dram_bw_after_urgent_percent = 80.0; - soc->max_request_size_bytes = 256; - soc->downspread_percent = 0.5; - soc->dram_page_open_time_ns = 50.0; - soc->dram_rw_turnaround_time_ns = 17.5; - soc->dram_return_buffer_per_channel_bytes = 8192; - soc->round_trip_ping_latency_dcfclk_cycles = 128; - soc->urgent_out_of_order_return_per_channel_bytes = 256; - soc->channel_interleave_bytes = 256; - soc->num_banks = 8; - soc->num_chans = 2; - soc->vmm_page_size_bytes = 4096; - soc->dram_clock_change_latency_us = 17.0; - soc->writeback_dram_clock_change_latency_us = 23.0; - soc->return_bus_width_bytes = 64; - } else { - BREAK_TO_DEBUGGER(); /* Invalid Project Specified */ + switch (project) { + case DML_PROJECT_RAVEN1: + *soc = dcn1_0_soc; + break; + default: + ASSERT(0); + break; } } static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project) { - if (project == DML_PROJECT_RAVEN1) { - ip->rob_buffer_size_kbytes = 64; - ip->det_buffer_size_kbytes = 164; - ip->dpte_buffer_size_in_pte_reqs = 42; - ip->dpp_output_buffer_pixels = 2560; - ip->opp_output_buffer_lines = 1; - ip->pixel_chunk_size_kbytes = 8; - ip->pte_enable = 1; - ip->pte_chunk_size_kbytes = 2; - ip->meta_chunk_size_kbytes = 2; - ip->writeback_chunk_size_kbytes = 2; - ip->line_buffer_size_bits = 589824; - ip->max_line_buffer_lines = 12; - ip->IsLineBufferBppFixed = 0; - ip->LineBufferFixedBpp = -1; - ip->writeback_luma_buffer_size_kbytes = 12; - ip->writeback_chroma_buffer_size_kbytes = 8; - ip->max_num_dpp = 4; - ip->max_num_wb = 2; - ip->max_dchub_pscl_bw_pix_per_clk = 4; - ip->max_pscl_lb_bw_pix_per_clk = 2; - ip->max_lb_vscl_bw_pix_per_clk = 4; - ip->max_vscl_hscl_bw_pix_per_clk = 4; - ip->max_hscl_ratio = 4; - ip->max_vscl_ratio = 4; - ip->hscl_mults = 4; - ip->vscl_mults = 4; - ip->max_hscl_taps = 8; - ip->max_vscl_taps = 8; - ip->dispclk_ramp_margin_percent = 1; - ip->underscan_factor = 1.10; - ip->min_vblank_lines = 14; - ip->dppclk_delay_subtotal = 90; - ip->dispclk_delay_subtotal = 42; - ip->dcfclk_cstate_latency = 10; - ip->max_inter_dcn_tile_repeaters = 8; - ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = 0; - ip->bug_forcing_LC_req_same_size_fixed = 0; - } else { - BREAK_TO_DEBUGGER(); /* Invalid Project Specified */ + switch (project) { + case DML_PROJECT_RAVEN1: + *ip = dcn1_0_ip; + break; + default: + ASSERT(0); + break; } } -- cgit v1.2.1 From 339cc82ae67700cb25a5bb10842cca5b09a79afe Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Wed, 4 Apr 2018 17:27:18 -0400 Subject: drm/amd/display: Check lid state to determine fast boot optimization. For legacy enable boot up with lid closed, eDP information couldn't be read correctly via SBIOS_SCRATCH_3 results in eDP cannot be light up properly when open lid. Check lid state instead can resolve the issue. Signed-off-by: Yongqiang Sun Reviewed-by: Eric Yang Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 + .../amd/display/dc/dce110/dce110_hw_sequencer.c | 24 ++++++++++++++-------- 2 files changed, 17 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index d7e6d53bb383..11b3433d6432 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -92,6 +92,7 @@ struct dc_stream_state { int phy_pix_clk; enum signal_type signal; bool dpms_off; + bool lid_state_closed; struct dc_stream_status status; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 1b5c11c8fa1f..4a4b3bcd4230 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1471,6 +1471,17 @@ static void disable_vga_and_power_gate_all_controllers( } } +static bool is_eDP_lid_closed(struct dc_state *context) +{ + int i; + + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->signal == SIGNAL_TYPE_EDP) + return context->streams[i]->lid_state_closed; + } + return false; +} + static struct dc_link *get_link_for_edp_not_in_use( struct dc *dc, struct dc_state *context) @@ -1505,20 +1516,17 @@ static struct dc_link *get_link_for_edp_not_in_use( */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { - struct dc_bios *dcb = dc->ctx->dc_bios; - - /* vbios already light up eDP, so we can leverage vbios and skip eDP + /* check eDP lid state: + * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP * programming */ - bool can_eDP_fast_boot_optimize = - (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE); - - /* if OS doesn't light up eDP and eDP link is available, we want to disable */ + bool lid_state_closed = is_eDP_lid_closed(context); struct dc_link *edp_link_to_turnoff = NULL; - if (can_eDP_fast_boot_optimize) { + if (!lid_state_closed) { edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); + /* if OS doesn't light up eDP and eDP link is available, we want to disable */ if (!edp_link_to_turnoff) dc->apply_edp_fast_boot_optimization = true; } -- cgit v1.2.1 From c4b0faae71f33377a11fe19dadcce6deb86f5037 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 6 Apr 2018 12:07:19 -0400 Subject: drm/amd/display: Do not create memory allocation if stats not enabled Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/stats/stats.c | 26 +++++++++++++---------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index ed5f6809a64e..48e02197919f 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -115,18 +115,22 @@ struct mod_stats *mod_stats_create(struct dc *dc) ®_data, sizeof(unsigned int), &flag)) core_stats->enabled = reg_data; - core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT; - if (dm_read_persistent_data(dc->ctx, NULL, NULL, - DAL_STATS_ENTRIES_REGKEY, - ®_data, sizeof(unsigned int), &flag)) { - if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX) - core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX; - else - core_stats->entries = reg_data; - } + if (core_stats->enabled) { + core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT; + if (dm_read_persistent_data(dc->ctx, NULL, NULL, + DAL_STATS_ENTRIES_REGKEY, + ®_data, sizeof(unsigned int), &flag)) { + if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX) + core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX; + else + core_stats->entries = reg_data; + } - core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries, - GFP_KERNEL); + core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries, + GFP_KERNEL); + } else { + core_stats->entries = 0; + } if (core_stats->time == NULL) goto fail_construct; -- cgit v1.2.1 From 5ebfb7a5996ea1dceeb2a392d7e46357042e4506 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Thu, 5 Apr 2018 17:09:20 -0400 Subject: drm/amd/display: Move DCC support functions into dchubbub Added dchububu.h header file for common enum/struct definitions. Added new interface functions get_dcc_compression_cap, dcc_support_swizzle, dcc_support_pixel_format. Signed-off-by: Eric Bernstein Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 221 +++++++++++++++++++- .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 7 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 231 +-------------------- drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h | 64 ++++++ 4 files changed, 291 insertions(+), 232 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index 738f67ffd1b4..b9fb14a3224b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -476,8 +476,227 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); } +static bool hubbub1_dcc_support_swizzle( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert) +{ + bool standard_swizzle = false; + bool display_swizzle = false; + + switch (swizzle) { + case DC_SW_4KB_S: + case DC_SW_64KB_S: + case DC_SW_VAR_S: + case DC_SW_4KB_S_X: + case DC_SW_64KB_S_X: + case DC_SW_VAR_S_X: + standard_swizzle = true; + break; + case DC_SW_4KB_D: + case DC_SW_64KB_D: + case DC_SW_VAR_D: + case DC_SW_4KB_D_X: + case DC_SW_64KB_D_X: + case DC_SW_VAR_D_X: + display_swizzle = true; + break; + default: + break; + } + + if (bytes_per_element == 1 && standard_swizzle) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__na; + return true; + } + if (bytes_per_element == 2 && standard_swizzle) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 4 && standard_swizzle) { + *segment_order_horz = segment_order__non_contiguous; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8 && standard_swizzle) { + *segment_order_horz = segment_order__na; + *segment_order_vert = segment_order__contiguous; + return true; + } + if (bytes_per_element == 8 && display_swizzle) { + *segment_order_horz = segment_order__contiguous; + *segment_order_vert = segment_order__non_contiguous; + return true; + } + + return false; +} + +static bool hubbub1_dcc_support_pixel_format( + enum surface_pixel_format format, + unsigned int *bytes_per_element) +{ + /* DML: get_bytes_per_element */ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + *bytes_per_element = 2; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + *bytes_per_element = 4; + return true; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + *bytes_per_element = 8; + return true; + default: + return false; + } +} + +static void hubbub1_get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, + unsigned int bytes_per_element) +{ + /* copied from DML. might want to refactor DML to leverage from DML */ + /* DML : get_blk256_size */ + if (bytes_per_element == 1) { + *blk256_width = 16; + *blk256_height = 16; + } else if (bytes_per_element == 2) { + *blk256_width = 16; + *blk256_height = 8; + } else if (bytes_per_element == 4) { + *blk256_width = 8; + *blk256_height = 8; + } else if (bytes_per_element == 8) { + *blk256_width = 8; + *blk256_height = 4; + } +} + +static void hubbub1_det_request_size( + unsigned int height, + unsigned int width, + unsigned int bpe, + bool *req128_horz_wc, + bool *req128_vert_wc) +{ + unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ + + unsigned int blk256_height = 0; + unsigned int blk256_width = 0; + unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; + + hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe); + + swath_bytes_horz_wc = height * blk256_height * bpe; + swath_bytes_vert_wc = width * blk256_width * bpe; + + *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ + + *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? + false : /* full 256B request */ + true; /* half 128b request */ +} + +static bool hubbub1_get_dcc_compression_cap(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output) +{ + struct dc *dc = hubbub->ctx->dc; + /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ + enum dcc_control dcc_control; + unsigned int bpe; + enum segment_order segment_order_horz, segment_order_vert; + bool req128_horz_wc, req128_vert_wc; + + memset(output, 0, sizeof(*output)); + + if (dc->debug.disable_dcc == DCC_DISABLE) + return false; + + if (!hubbub->funcs->dcc_support_pixel_format(input->format, &bpe)) + return false; + + if (!hubbub->funcs->dcc_support_swizzle(input->swizzle_mode, bpe, + &segment_order_horz, &segment_order_vert)) + return false; + + hubbub1_det_request_size(input->surface_size.height, input->surface_size.width, + bpe, &req128_horz_wc, &req128_vert_wc); + + if (!req128_horz_wc && !req128_vert_wc) { + dcc_control = dcc_control__256_256_xxx; + } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { + if (!req128_horz_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_horz == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else if (input->scan == SCAN_DIRECTION_VERTICAL) { + if (!req128_vert_wc) + dcc_control = dcc_control__256_256_xxx; + else if (segment_order_vert == segment_order__contiguous) + dcc_control = dcc_control__128_128_xxx; + else + dcc_control = dcc_control__256_64_64; + } else { + if ((req128_horz_wc && + segment_order_horz == segment_order__non_contiguous) || + (req128_vert_wc && + segment_order_vert == segment_order__non_contiguous)) + /* access_dir not known, must use most constraining */ + dcc_control = dcc_control__256_64_64; + else + /* reg128 is true for either horz and vert + * but segment_order is contiguous + */ + dcc_control = dcc_control__128_128_xxx; + } + + if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && + dcc_control != dcc_control__256_256_xxx) + return false; + + switch (dcc_control) { + case dcc_control__256_256_xxx: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 256; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__128_128_xxx: + output->grph.rgb.max_uncompressed_blk_size = 128; + output->grph.rgb.max_compressed_blk_size = 128; + output->grph.rgb.independent_64b_blks = false; + break; + case dcc_control__256_64_64: + output->grph.rgb.max_uncompressed_blk_size = 256; + output->grph.rgb.max_compressed_blk_size = 64; + output->grph.rgb.independent_64b_blks = true; + break; + } + + output->capable = true; + output->const_color_support = false; + + return true; +} + static const struct hubbub_funcs hubbub1_funcs = { - .update_dchub = hubbub1_update_dchub + .update_dchub = hubbub1_update_dchub, + .dcc_support_swizzle = hubbub1_dcc_support_swizzle, + .dcc_support_pixel_format = hubbub1_dcc_support_pixel_format, + .get_dcc_compression_cap = hubbub1_get_dcc_compression_cap, }; void hubbub1_construct(struct hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index a16e908821a0..f479f54e5bb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -27,6 +27,7 @@ #define __DC_HUBBUB_DCN10_H__ #include "core_types.h" +#include "dchubbub.h" #define HUBHUB_REG_LIST_DCN()\ SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\ @@ -173,12 +174,6 @@ struct dcn_hubbub_wm { struct dcn_hubbub_wm_set sets[4]; }; -struct hubbub_funcs { - void (*update_dchub)( - struct hubbub *hubbub, - struct dchub_init_data *dh_data); -}; - struct hubbub { const struct hubbub_funcs *funcs; struct dc_context *ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f305f65675d8..2c0a3150bf2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -937,235 +937,16 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer( return idle_pipe; } -enum dcc_control { - dcc_control__256_256_xxx, - dcc_control__128_128_xxx, - dcc_control__256_64_64, -}; - -enum segment_order { - segment_order__na, - segment_order__contiguous, - segment_order__non_contiguous, -}; - -static bool dcc_support_pixel_format( - enum surface_pixel_format format, - unsigned int *bytes_per_element) -{ - /* DML: get_bytes_per_element */ - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - *bytes_per_element = 2; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - *bytes_per_element = 4; - return true; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - *bytes_per_element = 8; - return true; - default: - return false; - } -} - -static bool dcc_support_swizzle( - enum swizzle_mode_values swizzle, - unsigned int bytes_per_element, - enum segment_order *segment_order_horz, - enum segment_order *segment_order_vert) -{ - bool standard_swizzle = false; - bool display_swizzle = false; - - switch (swizzle) { - case DC_SW_4KB_S: - case DC_SW_64KB_S: - case DC_SW_VAR_S: - case DC_SW_4KB_S_X: - case DC_SW_64KB_S_X: - case DC_SW_VAR_S_X: - standard_swizzle = true; - break; - case DC_SW_4KB_D: - case DC_SW_64KB_D: - case DC_SW_VAR_D: - case DC_SW_4KB_D_X: - case DC_SW_64KB_D_X: - case DC_SW_VAR_D_X: - display_swizzle = true; - break; - default: - break; - } - - if (bytes_per_element == 1 && standard_swizzle) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__na; - return true; - } - if (bytes_per_element == 2 && standard_swizzle) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 4 && standard_swizzle) { - *segment_order_horz = segment_order__non_contiguous; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8 && standard_swizzle) { - *segment_order_horz = segment_order__na; - *segment_order_vert = segment_order__contiguous; - return true; - } - if (bytes_per_element == 8 && display_swizzle) { - *segment_order_horz = segment_order__contiguous; - *segment_order_vert = segment_order__non_contiguous; - return true; - } - - return false; -} - -static void get_blk256_size(unsigned int *blk256_width, unsigned int *blk256_height, - unsigned int bytes_per_element) -{ - /* copied from DML. might want to refactor DML to leverage from DML */ - /* DML : get_blk256_size */ - if (bytes_per_element == 1) { - *blk256_width = 16; - *blk256_height = 16; - } else if (bytes_per_element == 2) { - *blk256_width = 16; - *blk256_height = 8; - } else if (bytes_per_element == 4) { - *blk256_width = 8; - *blk256_height = 8; - } else if (bytes_per_element == 8) { - *blk256_width = 8; - *blk256_height = 4; - } -} - -static void det_request_size( - unsigned int height, - unsigned int width, - unsigned int bpe, - bool *req128_horz_wc, - bool *req128_vert_wc) -{ - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - - unsigned int blk256_height = 0; - unsigned int blk256_width = 0; - unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; - - get_blk256_size(&blk256_width, &blk256_height, bpe); - - swath_bytes_horz_wc = height * blk256_height * bpe; - swath_bytes_vert_wc = width * blk256_width * bpe; - - *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ - - *req128_vert_wc = (2 * swath_bytes_vert_wc <= detile_buf_size) ? - false : /* full 256B request */ - true; /* half 128b request */ -} - -static bool get_dcc_compression_cap(const struct dc *dc, +static bool dcn10_get_dcc_compression_cap(const struct dc *dc, const struct dc_dcc_surface_param *input, struct dc_surface_dcc_cap *output) { - /* implement section 1.6.2.1 of DCN1_Programming_Guide.docx */ - enum dcc_control dcc_control; - unsigned int bpe; - enum segment_order segment_order_horz, segment_order_vert; - bool req128_horz_wc, req128_vert_wc; - - memset(output, 0, sizeof(*output)); - - if (dc->debug.disable_dcc == DCC_DISABLE) - return false; - - if (!dcc_support_pixel_format(input->format, - &bpe)) - return false; - - if (!dcc_support_swizzle(input->swizzle_mode, bpe, - &segment_order_horz, &segment_order_vert)) - return false; - - det_request_size(input->surface_size.height, input->surface_size.width, - bpe, &req128_horz_wc, &req128_vert_wc); - - if (!req128_horz_wc && !req128_vert_wc) { - dcc_control = dcc_control__256_256_xxx; - } else if (input->scan == SCAN_DIRECTION_HORIZONTAL) { - if (!req128_horz_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_horz == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else if (input->scan == SCAN_DIRECTION_VERTICAL) { - if (!req128_vert_wc) - dcc_control = dcc_control__256_256_xxx; - else if (segment_order_vert == segment_order__contiguous) - dcc_control = dcc_control__128_128_xxx; - else - dcc_control = dcc_control__256_64_64; - } else { - if ((req128_horz_wc && - segment_order_horz == segment_order__non_contiguous) || - (req128_vert_wc && - segment_order_vert == segment_order__non_contiguous)) - /* access_dir not known, must use most constraining */ - dcc_control = dcc_control__256_64_64; - else - /* reg128 is true for either horz and vert - * but segment_order is contiguous - */ - dcc_control = dcc_control__128_128_xxx; - } - - if (dc->debug.disable_dcc == DCC_HALF_REQ_DISALBE && - dcc_control != dcc_control__256_256_xxx) - return false; - - switch (dcc_control) { - case dcc_control__256_256_xxx: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 256; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__128_128_xxx: - output->grph.rgb.max_uncompressed_blk_size = 128; - output->grph.rgb.max_compressed_blk_size = 128; - output->grph.rgb.independent_64b_blks = false; - break; - case dcc_control__256_64_64: - output->grph.rgb.max_uncompressed_blk_size = 256; - output->grph.rgb.max_compressed_blk_size = 64; - output->grph.rgb.independent_64b_blks = true; - break; - } - - output->capable = true; - output->const_color_support = false; - - return true; + return dc->res_pool->hubbub->funcs->get_dcc_compression_cap( + dc->res_pool->hubbub, + input, + output); } - static void dcn10_destroy_resource_pool(struct resource_pool **pool) { struct dcn10_resource_pool *dcn10_pool = TO_DCN10_RES_POOL(*pool); @@ -1186,7 +967,7 @@ static enum dc_status dcn10_validate_plane(const struct dc_plane_state *plane_st } static struct dc_cap_funcs cap_funcs = { - .get_dcc_compression_cap = get_dcc_compression_cap + .get_dcc_compression_cap = dcn10_get_dcc_compression_cap }; static struct resource_funcs dcn10_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h new file mode 100644 index 000000000000..02f757dd70d4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -0,0 +1,64 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DCHUBBUB_H__ +#define __DAL_DCHUBBUB_H__ + + +enum dcc_control { + dcc_control__256_256_xxx, + dcc_control__128_128_xxx, + dcc_control__256_64_64, +}; + +enum segment_order { + segment_order__na, + segment_order__contiguous, + segment_order__non_contiguous, +}; + + +struct hubbub_funcs { + void (*update_dchub)( + struct hubbub *hubbub, + struct dchub_init_data *dh_data); + + bool (*get_dcc_compression_cap)(struct hubbub *hubbub, + const struct dc_dcc_surface_param *input, + struct dc_surface_dcc_cap *output); + + bool (*dcc_support_swizzle)( + enum swizzle_mode_values swizzle, + unsigned int bytes_per_element, + enum segment_order *segment_order_horz, + enum segment_order *segment_order_vert); + + bool (*dcc_support_pixel_format)( + enum surface_pixel_format format, + unsigned int *bytes_per_element); +}; + + +#endif -- cgit v1.2.1 From 7ac897b5afb98369a4edd71950921026c3029d5f Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Fri, 6 Apr 2018 23:03:12 -0400 Subject: drm/amd/display: HDMI has no sound after Panel power off/on Signed-off-by: Charlene Liu Reviewed-by: Krunoslav Kovac Acked-by: Harry Wentland Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 07c32421c226..84e26c894046 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -718,6 +718,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets( if (info_frame->avi.valid) { const uint32_t *content = (const uint32_t *) &info_frame->avi.sb[0]; + /*we need turn on clock before programming AFMT block*/ + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); REG_WRITE(AFMT_AVI_INFO0, content[0]); -- cgit v1.2.1 From 2c37e49a6bcd5e0c66963301e9feab63b5f928f3 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Fri, 6 Apr 2018 21:38:10 -0400 Subject: drm/amd/display: Check SCRATCH reg to determine S3 resume. Use lid state only to determine fast boot optimization is not enough. For S3/Resume, due to bios isn't involved in boot, eDP wasn't light up, while lid state is open, if do fast boot optimization, eDP panel will skip enable link and result in black screen after boot. And becasue of bios isn't involved, no matter UEFI or Legacy boot, BIOS_SCRATCH_3 value should be 0, use this to determine the case. Signed-off-by: Yongqiang Sun Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 33 ++++++++++++++++++---- 1 file changed, 28 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 4a4b3bcd4230..bd34193ad779 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1516,18 +1516,41 @@ static struct dc_link *get_link_for_edp_not_in_use( */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { - /* check eDP lid state: - * If lid is open, vbios already light up eDP, so we can leverage vbios and skip eDP - * programming + /* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization + * UEFI boot + * edp_active_status_from_scratch fast boot optimization + * S4/S5 resume: + * Lid Open true true + * Lid Close false false + * + * S3/ resume: + * Lid Open false false + * Lid Close false false + * + * Legacy boot: + * edp_active_status_from_scratch fast boot optimization + * S4/S resume: + * Lid Open true true + * Lid Close true false + * + * S3/ resume: + * Lid Open false false + * Lid Close false false */ + struct dc_bios *dcb = dc->ctx->dc_bios; bool lid_state_closed = is_eDP_lid_closed(context); struct dc_link *edp_link_to_turnoff = NULL; + bool edp_active_status_from_scratch = + (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE); + /*Lid open*/ if (!lid_state_closed) { edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); - /* if OS doesn't light up eDP and eDP link is available, we want to disable */ - if (!edp_link_to_turnoff) + /* if OS doesn't light up eDP and eDP link is available, we want to disable + * If resume from S4/S5, should optimization. + */ + if (!edp_link_to_turnoff && edp_active_status_from_scratch) dc->apply_edp_fast_boot_optimization = true; } -- cgit v1.2.1 From 0a93dc7f595f43b621277ecfc05a44ed0c719a5f Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 29 Mar 2018 08:43:02 -0400 Subject: drm/amd/display: add rq/dlg/ttu to dtn log Signed-off-by: Dmytro Laktyushkin Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 59 ++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 153 ++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 19 +-- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 114 ++++++++++++++- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 20 +++ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 56 ++++++++ 6 files changed, 401 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 48e1fcf53d43..bd0fda0ceb91 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, return reg_val; } +uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + return reg_val; +} + +uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); + return reg_val; +} + +uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7, + uint8_t shift8, uint32_t mask8, uint32_t *field_value8) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); + *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); + return reg_val; +} /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer * compiler won't be able to check for size match and is prone to stack corruption type of bugs diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 4ca9b6e9a824..58062172cf3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -756,9 +756,159 @@ void min_set_viewport( PRI_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp1_read_state(struct dcn10_hubp *hubp1, +void hubp1_read_state(struct hubp *hubp, struct dcn_hubp_state *s) { + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr; + struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + /* Requester */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end); + + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start); + + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal); + + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler); + + if (REG(PREFETCH_SETTINS)) + REG_GET_2(PREFETCH_SETTINS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + else + REG_GET_2(PREFETCH_SETTINGS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + + REG_GET_2(VBLANK_PARAMETERS_0, + DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank, + DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank); + + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l); + + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l); + + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l); + + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l); + + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l); + + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l); + + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c); + + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c); + + if (REG(PREFETCH_SETTINS_C)) + REG_GET(PREFETCH_SETTINS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + else + REG_GET(PREFETCH_SETTINGS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c); + + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c); + + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c); + + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c); + + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c); + + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm); + + REG_GET_2(DCN_GLOBAL_TTU_CNTL, + MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank, + QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l); + + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_l); + + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c); + + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_c); + + /* Rest of hubp */ REG_GET(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, &s->pixel_format); @@ -956,6 +1106,7 @@ static struct hubp_funcs dcn10_hubp_funcs = { .hubp_disconnect = hubp1_disconnect, .hubp_clk_cntl = hubp1_clk_cntl, .hubp_vtg_sel = hubp1_vtg_sel, + .hubp_read_state = hubp1_read_state, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index e0d6d32357c0..920ae3a1b412 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -690,24 +690,7 @@ void dcn10_hubp_construct( const struct dcn_mi_shift *hubp_shift, const struct dcn_mi_mask *hubp_mask); - -struct dcn_hubp_state { - uint32_t pixel_format; - uint32_t inuse_addr_hi; - uint32_t viewport_width; - uint32_t viewport_height; - uint32_t rotation_angle; - uint32_t h_mirror_en; - uint32_t sw_mode; - uint32_t dcc_en; - uint32_t blank_en; - uint32_t underflow_status; - uint32_t ttu_disable; - uint32_t min_ttu_vblank; - uint32_t qos_level_low_wm; - uint32_t qos_level_high_wm; -}; -void hubp1_read_state(struct dcn10_hubp *hubp1, +void hubp1_read_state(struct hubp *hubp, struct dcn_hubp_state *s); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index a6cf9ade9131..7dd130d15a67 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -112,6 +112,104 @@ void dcn10_log_hubbub_state(struct dc *dc) DTN_INFO("\n"); } +static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s) +{ + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + DTN_INFO("========Requester========\n"); + DTN_INFO("drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode); + DTN_INFO("prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode); + DTN_INFO("mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode); + DTN_INFO("crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode); + DTN_INFO("plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address); + DTN_INFO("====\n"); + DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_l.chunk_size); + DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size); + DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size); + DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size); + DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size); + DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size); + DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_l.swath_height); + DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear); + DTN_INFO("====\n"); + DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_c.chunk_size); + DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size); + DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size); + DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size); + DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size); + DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size); + DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_c.swath_height); + DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear); + + DTN_INFO("========DLG========\n"); + DTN_INFO("refcyc_h_blank_end = 0x%0x\n", dlg_regs->refcyc_h_blank_end); + DTN_INFO("dlg_vblank_end = 0x%0x\n", dlg_regs->dlg_vblank_end); + DTN_INFO("min_dst_y_next_start = 0x%0x\n", dlg_regs->min_dst_y_next_start); + DTN_INFO("refcyc_per_htotal = 0x%0x\n", dlg_regs->refcyc_per_htotal); + DTN_INFO("refcyc_x_after_scaler = 0x%0x\n", dlg_regs->refcyc_x_after_scaler); + DTN_INFO("dst_y_after_scaler = 0x%0x\n", dlg_regs->dst_y_after_scaler); + DTN_INFO("dst_y_prefetch = 0x%0x\n", dlg_regs->dst_y_prefetch); + DTN_INFO("dst_y_per_vm_vblank = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank); + DTN_INFO("dst_y_per_row_vblank = 0x%0x\n", dlg_regs->dst_y_per_row_vblank); + DTN_INFO("dst_y_per_vm_flip = 0x%0x\n", dlg_regs->dst_y_per_vm_flip); + DTN_INFO("dst_y_per_row_flip = 0x%0x\n", dlg_regs->dst_y_per_row_flip); + DTN_INFO("ref_freq_to_pix_freq = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq); + DTN_INFO("vratio_prefetch = 0x%0x\n", dlg_regs->vratio_prefetch); + DTN_INFO("vratio_prefetch_c = 0x%0x\n", dlg_regs->vratio_prefetch_c); + DTN_INFO("refcyc_per_pte_group_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l); + DTN_INFO("refcyc_per_pte_group_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c); + DTN_INFO("refcyc_per_meta_chunk_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l); + DTN_INFO("refcyc_per_meta_chunk_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c); + DTN_INFO("refcyc_per_pte_group_flip_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l); + DTN_INFO("refcyc_per_pte_group_flip_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c); + DTN_INFO("refcyc_per_meta_chunk_flip_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l); + DTN_INFO("refcyc_per_meta_chunk_flip_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c); + DTN_INFO("dst_y_per_pte_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l); + DTN_INFO("dst_y_per_pte_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c); + DTN_INFO("refcyc_per_pte_group_nom_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l); + DTN_INFO("refcyc_per_pte_group_nom_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c); + DTN_INFO("dst_y_per_meta_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l); + DTN_INFO("dst_y_per_meta_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c); + DTN_INFO("refcyc_per_meta_chunk_nom_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l); + DTN_INFO("refcyc_per_meta_chunk_nom_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c); + DTN_INFO("refcyc_per_line_delivery_pre_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l); + DTN_INFO("refcyc_per_line_delivery_pre_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c); + DTN_INFO("refcyc_per_line_delivery_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l); + DTN_INFO("refcyc_per_line_delivery_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c); + DTN_INFO("chunk_hdl_adjust_cur0 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0); + DTN_INFO("dst_y_offset_cur1 = 0x%0x\n", dlg_regs->dst_y_offset_cur1); + DTN_INFO("chunk_hdl_adjust_cur1 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1); + DTN_INFO("vready_after_vcount0 = 0x%0x\n", dlg_regs->vready_after_vcount0); + DTN_INFO("dst_y_delta_drq_limit = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit); + DTN_INFO("xfc_reg_transfer_delay = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay); + DTN_INFO("xfc_reg_precharge_delay = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay); + DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency); + + DTN_INFO("========TTU========\n"); + DTN_INFO("qos_level_low_wm = 0x%0x\n", ttu_regs->qos_level_low_wm); + DTN_INFO("qos_level_high_wm = 0x%0x\n", ttu_regs->qos_level_high_wm); + DTN_INFO("min_ttu_vblank = 0x%0x\n", ttu_regs->min_ttu_vblank); + DTN_INFO("qos_level_flip = 0x%0x\n", ttu_regs->qos_level_flip); + DTN_INFO("refcyc_per_req_delivery_pre_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l); + DTN_INFO("refcyc_per_req_delivery_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l); + DTN_INFO("refcyc_per_req_delivery_pre_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c); + DTN_INFO("refcyc_per_req_delivery_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c); + DTN_INFO("refcyc_per_req_delivery_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0); + DTN_INFO("refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0); + DTN_INFO("refcyc_per_req_delivery_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1); + DTN_INFO("refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1); + DTN_INFO("qos_level_fixed_l = 0x%0x\n", ttu_regs->qos_level_fixed_l); + DTN_INFO("qos_ramp_disable_l = 0x%0x\n", ttu_regs->qos_ramp_disable_l); + DTN_INFO("qos_level_fixed_c = 0x%0x\n", ttu_regs->qos_level_fixed_c); + DTN_INFO("qos_ramp_disable_c = 0x%0x\n", ttu_regs->qos_ramp_disable_c); + DTN_INFO("qos_level_fixed_cur0 = 0x%0x\n", ttu_regs->qos_level_fixed_cur0); + DTN_INFO("qos_ramp_disable_cur0 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0); + DTN_INFO("qos_level_fixed_cur1 = 0x%0x\n", ttu_regs->qos_level_fixed_cur1); + DTN_INFO("qos_ramp_disable_cur1 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1); +} + void dcn10_log_hw_state(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; @@ -129,7 +227,7 @@ void dcn10_log_hw_state(struct dc *dc) struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state s; - hubp1_read_state(TO_DCN10_HUBP(hubp), &s); + hubp->funcs->hubp_read_state(hubp, &s); DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" " %6d %8d %7d %8xh", @@ -201,6 +299,20 @@ void dcn10_log_hw_state(struct dc *dc) } DTN_INFO("\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct hubp *hubp = pool->hubps[i]; + struct dcn_hubp_state s = {0}; + + if (!dc->current_state->res_ctx.pipe_ctx[i].stream) + continue; + + hubp->funcs->hubp_read_state(hubp, &s); + DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i); + print_rq_dlg_ttu_regs(dc_ctx, &s); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + log_mpc_crc(dc); DTN_INFO_END(); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 9ced254e652c..3866147fb02a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -56,6 +56,25 @@ struct hubp { bool power_gated; }; +struct dcn_hubp_state { + struct _vcs_dpi_display_dlg_regs_st dlg_attr; + struct _vcs_dpi_display_ttu_regs_st ttu_attr; + struct _vcs_dpi_display_rq_regs_st rq_regs; + uint32_t pixel_format; + uint32_t inuse_addr_hi; + uint32_t viewport_width; + uint32_t viewport_height; + uint32_t rotation_angle; + uint32_t h_mirror_en; + uint32_t sw_mode; + uint32_t dcc_en; + uint32_t blank_en; + uint32_t underflow_status; + uint32_t ttu_disable; + uint32_t min_ttu_vblank; + uint32_t qos_level_low_wm; + uint32_t qos_level_high_wm; +}; struct hubp_funcs { void (*hubp_setup)( @@ -121,6 +140,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); + void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 77eb72874e90..3306e7b0b3e3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -183,6 +183,36 @@ FN(reg_name, f4), v4, \ FN(reg_name, f5), v5) +#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \ + generic_reg_get6(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6) + +#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \ + generic_reg_get7(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6, \ + FN(reg_name, f7), v7) + +#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \ + generic_reg_get8(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6, \ + FN(reg_name, f7), v7, \ + FN(reg_name, f8), v8) + /* macro to poll and wait for a register field to read back given value */ #define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \ @@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5); +uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6); + +uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7); + +uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7, + uint8_t shift8, uint32_t mask8, uint32_t *field_value8); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ -- cgit v1.2.1 From a47654633596a63f14a9035b9c762f8aaf1e00a3 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 4 Apr 2018 16:03:38 -0400 Subject: drm/amd/display: add calculated clock logging to DTN Signed-off-by: Dmytro Laktyushkin Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 7dd130d15a67..e547f46d3516 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -311,7 +311,16 @@ void dcn10_log_hw_state(struct dc *dc) print_rq_dlg_ttu_regs(dc_ctx, &s); DTN_INFO("\n"); } - DTN_INFO("\n"); + + DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" + "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", + dc->current_state->bw.dcn.calc_clk.dcfclk_khz, + dc->current_state->bw.dcn.calc_clk.dcfclk_deep_sleep_khz, + dc->current_state->bw.dcn.calc_clk.dispclk_khz, + dc->current_state->bw.dcn.calc_clk.dppclk_khz, + dc->current_state->bw.dcn.calc_clk.max_supported_dppclk_khz, + dc->current_state->bw.dcn.calc_clk.fclk_khz, + dc->current_state->bw.dcn.calc_clk.socclk_khz); log_mpc_crc(dc); -- cgit v1.2.1 From ad019f7b6db893271d13148d6d80001d0c23cdf9 Mon Sep 17 00:00:00 2001 From: Yue Hin Lau Date: Mon, 9 Apr 2018 14:46:32 -0400 Subject: drm/amd/display: add missing colorspace for set black color Signed-off-by: Yue Hin Lau Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/core/dc_hw_sequencer.c | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 481f6928a9c0..83d121510ef5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -208,6 +208,7 @@ void color_space_to_black_color( case COLOR_SPACE_YCBCR709: case COLOR_SPACE_YCBCR601_LIMITED: case COLOR_SPACE_YCBCR709_LIMITED: + case COLOR_SPACE_2020_YCBCR: *black_color = black_color_format[BLACK_COLOR_FORMAT_YUV_CV]; break; @@ -216,7 +217,25 @@ void color_space_to_black_color( black_color_format[BLACK_COLOR_FORMAT_RGB_LIMITED]; break; - default: + /** + * Remove default and add case for all color space + * so when we forget to add new color space + * compiler will give a warning + */ + case COLOR_SPACE_UNKNOWN: + case COLOR_SPACE_SRGB: + case COLOR_SPACE_XR_RGB: + case COLOR_SPACE_MSREF_SCRGB: + case COLOR_SPACE_XV_YCC_709: + case COLOR_SPACE_XV_YCC_601: + case COLOR_SPACE_2020_RGB_FULLRANGE: + case COLOR_SPACE_2020_RGB_LIMITEDRANGE: + case COLOR_SPACE_ADOBERGB: + case COLOR_SPACE_DCIP3: + case COLOR_SPACE_DISPLAYNATIVE: + case COLOR_SPACE_DOLBYVISION: + case COLOR_SPACE_APPCTRL: + case COLOR_SPACE_CUSTOMPOINTS: /* fefault is sRGB black (full range). */ *black_color = black_color_format[BLACK_COLOR_FORMAT_RGB_FULLRANGE]; -- cgit v1.2.1 From f0c0761b38ac30b04d4fed436ff10e894ec0e525 Mon Sep 17 00:00:00 2001 From: Yongqiang Sun Date: Mon, 9 Apr 2018 16:15:20 -0400 Subject: drm/amd/display: Use dig enable to determine fast boot optimization. Linux doesn't know lid state, better to check dig enable value from register. Signed-off-by: Yongqiang Sun Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_stream.h | 1 - .../gpu/drm/amd/display/dc/dce/dce_link_encoder.c | 6 ++- .../gpu/drm/amd/display/dc/dce/dce_link_encoder.h | 2 + .../amd/display/dc/dce110/dce110_hw_sequencer.c | 47 +++++++--------------- .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 1 + 5 files changed, 21 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 11b3433d6432..d7e6d53bb383 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -92,7 +92,6 @@ struct dc_stream_state { int phy_pix_clk; enum signal_type signal; bool dpms_off; - bool lid_state_closed; struct dc_stream_status status; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 8167cad7bcf7..dbe3b26b6d9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -113,6 +113,7 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = { .connect_dig_be_to_fe = dce110_link_encoder_connect_dig_be_to_fe, .enable_hpd = dce110_link_encoder_enable_hpd, .disable_hpd = dce110_link_encoder_disable_hpd, + .is_dig_enabled = dce110_is_dig_enabled, .destroy = dce110_link_encoder_destroy }; @@ -535,8 +536,9 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc, DP_SEC_GSP0_PRIORITY, 1); } -static bool is_dig_enabled(const struct dce110_link_encoder *enc110) +bool dce110_is_dig_enabled(struct link_encoder *enc) { + struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc); uint32_t value; REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value); @@ -1031,7 +1033,7 @@ void dce110_link_encoder_disable_output( struct bp_transmitter_control cntl = { 0 }; enum bp_result result; - if (!is_dig_enabled(enc110)) { + if (!dce110_is_dig_enabled(enc)) { /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ return; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 0ec3433d34b6..347069461a22 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -263,4 +263,6 @@ void dce110_psr_program_dp_dphy_fast_training(struct link_encoder *enc, void dce110_psr_program_secondary_packet(struct link_encoder *enc, unsigned int sdp_transmit_line_num_deadline); +bool dce110_is_dig_enabled(struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index bd34193ad779..e70ccb9b6afe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1471,15 +1471,15 @@ static void disable_vga_and_power_gate_all_controllers( } } -static bool is_eDP_lid_closed(struct dc_state *context) +static struct dc_link *get_link_for_edp(struct dc *dc) { int i; - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->signal == SIGNAL_TYPE_EDP) - return context->streams[i]->lid_state_closed; + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) + return dc->links[i]; } - return false; + return NULL; } static struct dc_link *get_link_for_edp_not_in_use( @@ -1516,41 +1516,22 @@ static struct dc_link *get_link_for_edp_not_in_use( */ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) { - /* check eDP lid state and BIOS_SCRATCH_3 to determine fast boot optimization - * UEFI boot - * edp_active_status_from_scratch fast boot optimization - * S4/S5 resume: - * Lid Open true true - * Lid Close false false - * - * S3/ resume: - * Lid Open false false - * Lid Close false false - * - * Legacy boot: - * edp_active_status_from_scratch fast boot optimization - * S4/S resume: - * Lid Open true true - * Lid Close true false - * - * S3/ resume: - * Lid Open false false - * Lid Close false false - */ - struct dc_bios *dcb = dc->ctx->dc_bios; - bool lid_state_closed = is_eDP_lid_closed(context); struct dc_link *edp_link_to_turnoff = NULL; - bool edp_active_status_from_scratch = - (dcb->funcs->get_vga_enabled_displays(dc->ctx->dc_bios) == ATOM_DISPLAY_LCD1_ACTIVE); + struct dc_link *edp_link = get_link_for_edp(dc); + bool can_eDP_fast_boot_optimize = false; + + if (edp_link) { + can_eDP_fast_boot_optimize = + edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc); + } - /*Lid open*/ - if (!lid_state_closed) { + if (can_eDP_fast_boot_optimize) { edp_link_to_turnoff = get_link_for_edp_not_in_use(dc, context); /* if OS doesn't light up eDP and eDP link is available, we want to disable * If resume from S4/S5, should optimization. */ - if (!edp_link_to_turnoff && edp_active_status_from_scratch) + if (!edp_link_to_turnoff) dc->apply_edp_fast_boot_optimization = true; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 54d8a1386142..cf6df2e7beb2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -149,6 +149,7 @@ struct link_encoder_funcs { bool connect); void (*enable_hpd)(struct link_encoder *enc); void (*disable_hpd)(struct link_encoder *enc); + bool (*is_dig_enabled)(struct link_encoder *enc); void (*destroy)(struct link_encoder **enc); }; -- cgit v1.2.1 From a906dbb1e20f5791d728c7d9e2366b8acb4f1bb2 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 16 Apr 2018 17:57:19 +0800 Subject: drm/amdgpu: add amdgpu_bo_param MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit amdgpu_bo_create has too many parameters, and used in too many places. Collect them to one structure. Signed-off-by: Chunming Zhou Reviewed-by: Huang Rui Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 75 +++++++++++++++++------------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 9 ++++ 2 files changed, 51 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 24f582c696cc..b33a7fdea7f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -341,27 +341,25 @@ fail: return false; } -static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, - int byte_align, u32 domain, - u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, +static int amdgpu_bo_do_create(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr) { struct ttm_operation_ctx ctx = { - .interruptible = (type != ttm_bo_type_kernel), + .interruptible = (bp->type != ttm_bo_type_kernel), .no_wait_gpu = false, - .resv = resv, + .resv = bp->resv, .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT }; struct amdgpu_bo *bo; - unsigned long page_align; + unsigned long page_align, size = bp->size; size_t acc_size; int r; - page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; + page_align = roundup(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT; size = ALIGN(size, PAGE_SIZE); - if (!amdgpu_bo_validate_size(adev, size, domain)) + if (!amdgpu_bo_validate_size(adev, size, bp->domain)) return -ENOMEM; *bo_ptr = NULL; @@ -375,18 +373,18 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->preferred_domains = domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); + bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); bo->allowed_domains = bo->preferred_domains; - if (type != ttm_bo_type_kernel && + if (bp->type != ttm_bo_type_kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; - bo->flags = flags; + bo->flags = bp->flags; #ifdef CONFIG_X86_32 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit @@ -417,11 +415,11 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, #endif bo->tbo.bdev = &adev->mman.bdev; - amdgpu_ttm_placement_from_domain(bo, domain); + amdgpu_ttm_placement_from_domain(bo, bp->domain); - r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type, + r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, &bo->placement, page_align, &ctx, acc_size, - NULL, resv, &amdgpu_ttm_bo_destroy); + NULL, bp->resv, &amdgpu_ttm_bo_destroy); if (unlikely(r != 0)) return r; @@ -433,10 +431,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, else amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); - if (type == ttm_bo_type_kernel) + if (bp->type == ttm_bo_type_kernel) bo->tbo.priority = 1; - if (flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && + if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; @@ -449,20 +447,20 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, unsigned long size, bo->tbo.moving = dma_fence_get(fence); dma_fence_put(fence); } - if (!resv) + if (!bp->resv) amdgpu_bo_unreserve(bo); *bo_ptr = bo; trace_amdgpu_bo_create(bo); /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */ - if (type == ttm_bo_type_device) + if (bp->type == ttm_bo_type_device) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; return 0; fail_unreserve: - if (!resv) + if (!bp->resv) ww_mutex_unlock(&bo->tbo.resv->lock); amdgpu_bo_unref(&bo); return r; @@ -472,16 +470,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, unsigned long size, int byte_align, struct amdgpu_bo *bo) { + struct amdgpu_bo_param bp = { + .size = size, + .byte_align = byte_align, + .domain = AMDGPU_GEM_DOMAIN_GTT, + .flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW, + .type = ttm_bo_type_kernel, + .resv = bo->tbo.resv + }; int r; if (bo->shadow) return 0; - r = amdgpu_bo_do_create(adev, size, byte_align, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW, - ttm_bo_type_kernel, - bo->tbo.resv, &bo->shadow); + r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); mutex_lock(&adev->shadow_list_lock); @@ -498,11 +501,17 @@ int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, struct reservation_object *resv, struct amdgpu_bo **bo_ptr) { - uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW; + struct amdgpu_bo_param bp = { + .size = size, + .byte_align = byte_align, + .domain = domain, + .flags = flags & ~AMDGPU_GEM_CREATE_SHADOW, + .type = type, + .resv = resv + }; int r; - r = amdgpu_bo_do_create(adev, size, byte_align, domain, - parent_flags, type, resv, bo_ptr); + r = amdgpu_bo_do_create(adev, &bp, bo_ptr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 1e9fe85abcbb..4bb6f0a8d799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -33,6 +33,15 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX +struct amdgpu_bo_param { + unsigned long size; + int byte_align; + u32 domain; + u64 flags; + enum ttm_bo_type type; + struct reservation_object *resv; +}; + /* bo virtual addresses in a vm */ struct amdgpu_bo_va_mapping { struct amdgpu_bo_va *bo_va; -- cgit v1.2.1 From 3216c6b71d1e6a7dce2fd29c531e8c99c1b88c95 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Mon, 16 Apr 2018 18:27:50 +0800 Subject: drm/amdgpu: use amdgpu_bo_param for amdgpu_bo_create v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After that, we can easily add new parameter when need. v2: a) rebase. b) Initialize struct amdgpu_bo_param, future new member could only be used in some one case, but all member should have its own initial value. Signed-off-by: Chunming Zhou Reviewed-by: Huang Rui (v1) Reviewed-by: Christian König Reviewed-by: Junwei Zhang Cc: christian.koenig@amd.com Cc: Felix.Kuehling@amd.com Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 12 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 11 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c | 15 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 17 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 11 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 58 ++++++++++++------------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 6 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 12 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_test.c | 18 +++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 26 ++++++++--- 11 files changed, 130 insertions(+), 71 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 4d36203ffb11..887702c59488 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -217,13 +217,19 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; + struct amdgpu_bo_param bp; int r; uint64_t gpu_addr_tmp = 0; void *cpu_ptr_tmp = NULL; - r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, - AMDGPU_GEM_CREATE_CPU_GTT_USWC, ttm_bo_type_kernel, - NULL, &bo); + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + r = amdgpu_bo_create(adev, &bp, &bo); if (r) { dev_err(adev->dev, "failed to allocate BO for amdkfd (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 1d6e1479da38..c1b0cdb401dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1004,6 +1004,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; struct amdgpu_bo *bo; + struct amdgpu_bo_param bp; int byte_align; u32 alloc_domain; u64 alloc_flags; @@ -1069,8 +1070,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", va, size, domain_string(alloc_domain)); - ret = amdgpu_bo_create(adev, size, byte_align, - alloc_domain, alloc_flags, ttm_bo_type_device, NULL, &bo); + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = byte_align; + bp.domain = alloc_domain; + bp.flags = alloc_flags; + bp.type = ttm_bo_type_device; + bp.resv = NULL; + ret = amdgpu_bo_create(adev, &bp, &bo); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 02b849be083b..19cfff31f2e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -75,13 +75,20 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, { struct amdgpu_bo *dobj = NULL; struct amdgpu_bo *sobj = NULL; + struct amdgpu_bo_param bp; uint64_t saddr, daddr; int r, n; int time; + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = PAGE_SIZE; + bp.domain = sdomain; + bp.flags = 0; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; n = AMDGPU_BENCHMARK_ITERATIONS; - r = amdgpu_bo_create(adev, size, PAGE_SIZE,sdomain, 0, - ttm_bo_type_kernel, NULL, &sobj); + r = amdgpu_bo_create(adev, &bp, &sobj); if (r) { goto out_cleanup; } @@ -93,8 +100,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size, if (r) { goto out_cleanup; } - r = amdgpu_bo_create(adev, size, PAGE_SIZE, ddomain, 0, - ttm_bo_type_kernel, NULL, &dobj); + bp.domain = ddomain; + r = amdgpu_bo_create(adev, &bp, &dobj); if (r) { goto out_cleanup; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index cf0f186c6092..17d6b9fb6d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -113,12 +113,17 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev) int r; if (adev->gart.robj == NULL) { - r = amdgpu_bo_create(adev, adev->gart.table_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - ttm_bo_type_kernel, NULL, - &adev->gart.robj); + struct amdgpu_bo_param bp; + + memset(&bp, 0, sizeof(bp)); + bp.size = adev->gart.table_size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + r = amdgpu_bo_create(adev, &bp, &adev->gart.robj); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 46b9ea4e6103..1200c5ba37da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -48,17 +48,24 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, struct drm_gem_object **obj) { struct amdgpu_bo *bo; + struct amdgpu_bo_param bp; int r; + memset(&bp, 0, sizeof(bp)); *obj = NULL; /* At least align on page size */ if (alignment < PAGE_SIZE) { alignment = PAGE_SIZE; } + bp.size = size; + bp.byte_align = alignment; + bp.type = type; + bp.resv = resv; retry: - r = amdgpu_bo_create(adev, size, alignment, initial_domain, - flags, type, resv, &bo); + bp.flags = flags; + bp.domain = initial_domain; + r = amdgpu_bo_create(adev, &bp, &bo); if (r) { if (r != -ERESTARTSYS) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b33a7fdea7f2..cac65e32a0b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -191,14 +191,21 @@ int amdgpu_bo_create_reserved(struct amdgpu_device *adev, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr) { + struct amdgpu_bo_param bp; bool free = false; int r; + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = align; + bp.domain = domain; + bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + if (!*bo_ptr) { - r = amdgpu_bo_create(adev, size, align, domain, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - ttm_bo_type_kernel, NULL, bo_ptr); + r = amdgpu_bo_create(adev, &bp, bo_ptr); if (r) { dev_err(adev->dev, "(%d) failed to allocate kernel bo\n", r); @@ -470,20 +477,21 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, unsigned long size, int byte_align, struct amdgpu_bo *bo) { - struct amdgpu_bo_param bp = { - .size = size, - .byte_align = byte_align, - .domain = AMDGPU_GEM_DOMAIN_GTT, - .flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | - AMDGPU_GEM_CREATE_SHADOW, - .type = ttm_bo_type_kernel, - .resv = bo->tbo.resv - }; + struct amdgpu_bo_param bp; int r; if (bo->shadow) return 0; + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = byte_align; + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_SHADOW; + bp.type = ttm_bo_type_kernel; + bp.resv = bo->tbo.resv; + r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); if (!r) { bo->shadow->parent = amdgpu_bo_ref(bo); @@ -495,34 +503,26 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, return r; } -int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, - int byte_align, u32 domain, - u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, +int amdgpu_bo_create(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr) { - struct amdgpu_bo_param bp = { - .size = size, - .byte_align = byte_align, - .domain = domain, - .flags = flags & ~AMDGPU_GEM_CREATE_SHADOW, - .type = type, - .resv = resv - }; + u64 flags = bp->flags; int r; - r = amdgpu_bo_do_create(adev, &bp, bo_ptr); + bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW; + r = amdgpu_bo_do_create(adev, bp, bo_ptr); if (r) return r; if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) { - if (!resv) + if (!bp->resv) WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, NULL)); - r = amdgpu_bo_create_shadow(adev, size, byte_align, (*bo_ptr)); + r = amdgpu_bo_create_shadow(adev, bp->size, bp->byte_align, (*bo_ptr)); - if (!resv) + if (!bp->resv) reservation_object_unlock((*bo_ptr)->tbo.resv); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 4bb6f0a8d799..e9a21d991e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -233,10 +233,8 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; } -int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, - int byte_align, u32 domain, - u64 flags, enum ttm_bo_type type, - struct reservation_object *resv, +int amdgpu_bo_create(struct amdgpu_device *adev, + struct amdgpu_bo_param *bp, struct amdgpu_bo **bo_ptr); int amdgpu_bo_create_reserved(struct amdgpu_device *adev, unsigned long size, int align, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 4b584cb75bf4..713417b6d15d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -102,12 +102,18 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, struct reservation_object *resv = attach->dmabuf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; + struct amdgpu_bo_param bp; int ret; + memset(&bp, 0, sizeof(bp)); + bp.size = attach->dmabuf->size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_CPU; + bp.flags = 0; + bp.type = ttm_bo_type_sg; + bp.resv = resv; ww_mutex_lock(&resv->lock, NULL); - ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg, - resv, &bo); + ret = amdgpu_bo_create(adev, &bp, &bo); if (ret) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index 2dbe87591f81..d167e8ab76d3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -33,6 +33,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_bo *vram_obj = NULL; struct amdgpu_bo **gtt_obj = NULL; + struct amdgpu_bo_param bp; uint64_t gart_addr, vram_addr; unsigned n, size; int i, r; @@ -58,9 +59,15 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) r = 1; goto out_cleanup; } - - r = amdgpu_bo_create(adev, size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, 0, - ttm_bo_type_kernel, NULL, &vram_obj); + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = 0; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + + r = amdgpu_bo_create(adev, &bp, &vram_obj); if (r) { DRM_ERROR("Failed to create VRAM object\n"); goto out_cleanup; @@ -79,9 +86,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) void **vram_start, **vram_end; struct dma_fence *fence = NULL; - r = amdgpu_bo_create(adev, size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_GTT, 0, - ttm_bo_type_kernel, NULL, gtt_obj + i); + bp.domain = AMDGPU_GEM_DOMAIN_GTT; + r = amdgpu_bo_create(adev, &bp, gtt_obj + i); if (r) { DRM_ERROR("Failed to create GTT object %d\n", i); goto out_lclean; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 29efaac6e3ed..dfd22db13fb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1316,6 +1316,7 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { struct ttm_operation_ctx ctx = { false, false }; + struct amdgpu_bo_param bp; int r = 0; int i; u64 vram_size = adev->gmc.visible_vram_size; @@ -1323,17 +1324,21 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) u64 size = adev->fw_vram_usage.size; struct amdgpu_bo *bo; + memset(&bp, 0, sizeof(bp)); + bp.size = adev->fw_vram_usage.size; + bp.byte_align = PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; if (adev->fw_vram_usage.size > 0 && adev->fw_vram_usage.size <= vram_size) { - r = amdgpu_bo_create(adev, adev->fw_vram_usage.size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, - ttm_bo_type_kernel, NULL, + r = amdgpu_bo_create(adev, &bp, &adev->fw_vram_usage.reserved_bo); if (r) goto error_create; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f0fbc331aa30..9ec7c1041df2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -412,11 +412,16 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, struct amdgpu_bo *pt; if (!entry->base.bo) { - r = amdgpu_bo_create(adev, - amdgpu_vm_bo_size(adev, level), - AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, flags, - ttm_bo_type_kernel, resv, &pt); + struct amdgpu_bo_param bp; + + memset(&bp, 0, sizeof(bp)); + bp.size = amdgpu_vm_bo_size(adev, level); + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = flags; + bp.type = ttm_bo_type_kernel; + bp.resv = resv; + r = amdgpu_bo_create(adev, &bp, &pt); if (r) return r; @@ -2368,6 +2373,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size, int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid) { + struct amdgpu_bo_param bp; const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; @@ -2422,8 +2428,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, flags |= AMDGPU_GEM_CREATE_SHADOW; size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level); - r = amdgpu_bo_create(adev, size, align, AMDGPU_GEM_DOMAIN_VRAM, flags, - ttm_bo_type_kernel, NULL, &vm->root.base.bo); + memset(&bp, 0, sizeof(bp)); + bp.size = size; + bp.byte_align = align; + bp.domain = AMDGPU_GEM_DOMAIN_VRAM; + bp.flags = flags; + bp.type = ttm_bo_type_kernel; + bp.resv = NULL; + r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo); if (r) goto error_free_sched_entity; -- cgit v1.2.1 From 7951e376704773134cefcf0751e9042368226f15 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 13 Apr 2018 16:13:41 +0800 Subject: drm/amdgpu: Reserved vram for smu to save debug info. v2: check reserved vram size before allocate. Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 +++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 6 ++++ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 35 +++++++++++++++++++++ 5 files changed, 88 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ea1b28536bfc..d64ef30fed47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -129,6 +129,7 @@ extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; extern int amdgpu_emu_mode; +extern uint amdgpu_smu_memory_pool_size; #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d7f2bbdfd348..5958e8112489 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -690,6 +690,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev, { u64 size_af, size_bf; + mc->gart_size += adev->pm.smu_prv_buffer_size; + size_af = adev->gmc.mc_mask - mc->vram_end; size_bf = mc->vram_start; if (size_bf > size_af) { @@ -907,6 +909,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev) } } +static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) +{ + struct sysinfo si; + bool is_os_64 = (sizeof(void *) == 8) ? true : false; + uint64_t total_memory; + uint64_t dram_size_seven_GB = 0x1B8000000; + uint64_t dram_size_three_GB = 0xB8000000; + + if (amdgpu_smu_memory_pool_size == 0) + return; + + if (!is_os_64) { + DRM_WARN("Not 64-bit OS, feature not supported\n"); + goto def_value; + } + si_meminfo(&si); + total_memory = (uint64_t)si.totalram * si.mem_unit; + + if ((amdgpu_smu_memory_pool_size == 1) || + (amdgpu_smu_memory_pool_size == 2)) { + if (total_memory < dram_size_three_GB) + goto def_value1; + } else if ((amdgpu_smu_memory_pool_size == 4) || + (amdgpu_smu_memory_pool_size == 8)) { + if (total_memory < dram_size_seven_GB) + goto def_value1; + } else { + DRM_WARN("Smu memory pool size not supported\n"); + goto def_value; + } + adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; + + return; + +def_value1: + DRM_WARN("No enough system memory\n"); +def_value: + adev->pm.smu_prv_buffer_size = 0; +} + /** * amdgpu_device_check_arguments - validate module params * @@ -948,6 +990,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_vm_fragment_size = -1; } + amdgpu_device_check_smu_prv_buffer_size(adev); + amdgpu_device_check_vm_size(adev); amdgpu_device_check_block_size(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index b8c5177fa809..19d8bf590da3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -445,6 +445,8 @@ struct amdgpu_pm { uint32_t pcie_gen_mask; uint32_t pcie_mlw_mask; struct amd_pp_display_configuration pm_display_cfg;/* set by dc */ + uint32_t smu_prv_buffer_size; + struct amdgpu_bo *smu_prv_buffer; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 0b19482b36b8..5c0567ad1ba7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -132,6 +132,7 @@ int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; int amdgpu_gpu_recovery = -1; /* auto */ int amdgpu_emu_mode = 0; +uint amdgpu_smu_memory_pool_size = 0; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -316,6 +317,11 @@ MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled) module_param_named(cik_support, amdgpu_cik_support, int, 0444); #endif +MODULE_PARM_DESC(smu_memory_pool_size, + "reserve gtt for smu debug usage, 0 = disable," + "0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte"); +module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_SI {0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI}, diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 66c49b89cdb4..6c8191444646 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -145,6 +145,37 @@ static int pp_hw_fini(void *handle) return 0; } +static void pp_reserve_vram_for_smu(struct amdgpu_device *adev) +{ + int r = -EINVAL; + void *cpu_ptr = NULL; + uint64_t gpu_addr; + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->pm.smu_prv_buffer, + &gpu_addr, + &cpu_ptr)) { + DRM_ERROR("amdgpu: failed to create smu prv buffer\n"); + return; + } + + if (hwmgr->hwmgr_func->notify_cac_buffer_info) + r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, + lower_32_bits((unsigned long)cpu_ptr), + upper_32_bits((unsigned long)cpu_ptr), + lower_32_bits(gpu_addr), + upper_32_bits(gpu_addr), + adev->pm.smu_prv_buffer_size); + + if (r) { + amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); + adev->pm.smu_prv_buffer = NULL; + DRM_ERROR("amdgpu: failed to notify SMU buffer address\n"); + } +} + static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; @@ -156,6 +187,8 @@ static int pp_late_init(void *handle) AMD_PP_TASK_COMPLETE_INIT, NULL); mutex_unlock(&hwmgr->smu_lock); } + if (adev->pm.smu_prv_buffer_size != 0) + pp_reserve_vram_for_smu(adev); return 0; } @@ -163,6 +196,8 @@ static void pp_late_fini(void *handle) { struct amdgpu_device *adev = handle; + if (adev->pm.smu_prv_buffer) + amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL); amd_powerplay_destroy(adev); } -- cgit v1.2.1 From a0d454a67737162b0e4b1cc91612d7b25d5681b0 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 13 Apr 2018 16:16:49 +0800 Subject: drm/amd/pp: Remove dead interface Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 6 ------ drivers/gpu/drm/amd/include/kgd_pp_interface.h | 5 ----- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 30 -------------------------- 3 files changed, 41 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 19d8bf590da3..354c6dc99481 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -349,12 +349,6 @@ enum amdgpu_pcie_gen { ((adev)->powerplay.pp_funcs->set_clockgating_by_smu(\ (adev)->powerplay.pp_handle, msg_id)) -#define amdgpu_dpm_notify_smu_memory_info(adev, virtual_addr_low, \ - virtual_addr_hi, mc_addr_low, mc_addr_hi, size) \ - ((adev)->powerplay.pp_funcs->notify_smu_memory_info)( \ - (adev)->powerplay.pp_handle, virtual_addr_low, \ - virtual_addr_hi, mc_addr_low, mc_addr_hi, size) - #define amdgpu_dpm_get_power_profile_mode(adev, buf) \ ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ (adev)->powerplay.pp_handle, buf)) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 1bec9072e36f..01969b135ab4 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -239,11 +239,6 @@ struct amd_pm_funcs { int (*load_firmware)(void *handle); int (*wait_for_fw_loading_complete)(void *handle); int (*set_clockgating_by_smu)(void *handle, uint32_t msg_id); - int (*notify_smu_memory_info)(void *handle, uint32_t virtual_addr_low, - uint32_t virtual_addr_hi, - uint32_t mc_addr_low, - uint32_t mc_addr_hi, - uint32_t size); int (*set_power_limit)(void *handle, uint32_t n); int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit); /* export to DC */ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 6c8191444646..bd0d387584ac 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -934,35 +934,6 @@ static int pp_dpm_switch_power_profile(void *handle, return 0; } -static int pp_dpm_notify_smu_memory_info(void *handle, - uint32_t virtual_addr_low, - uint32_t virtual_addr_hi, - uint32_t mc_addr_low, - uint32_t mc_addr_hi, - uint32_t size) -{ - struct pp_hwmgr *hwmgr = handle; - int ret = 0; - - if (!hwmgr || !hwmgr->pm_en) - return -EINVAL; - - if (hwmgr->hwmgr_func->notify_cac_buffer_info == NULL) { - pr_info("%s was not implemented.\n", __func__); - return -EINVAL; - } - - mutex_lock(&hwmgr->smu_lock); - - ret = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr, virtual_addr_low, - virtual_addr_hi, mc_addr_low, mc_addr_hi, - size); - - mutex_unlock(&hwmgr->smu_lock); - - return ret; -} - static int pp_set_power_limit(void *handle, uint32_t limit) { struct pp_hwmgr *hwmgr = handle; @@ -1229,7 +1200,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_vce_clock_state = pp_dpm_get_vce_clock_state, .switch_power_profile = pp_dpm_switch_power_profile, .set_clockgating_by_smu = pp_set_clockgating_by_smu, - .notify_smu_memory_info = pp_dpm_notify_smu_memory_info, .get_power_profile_mode = pp_get_power_profile_mode, .set_power_profile_mode = pp_set_power_profile_mode, .odn_edit_dpm_table = pp_odn_edit_dpm_table, -- cgit v1.2.1 From 8d80fada066bec682f1b7e9015b8412e3460c1b3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 17 Apr 2018 17:26:26 +0800 Subject: drm/amd/pp: Fix bug voltage can't be OD separately on VI Make sure to update the MCLK and SCLK flags when setting the VDDC flags due to dependencies. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 68aae09a886a..720ac47d3365 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4679,23 +4679,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } } static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, -- cgit v1.2.1 From 32d8c6620d49779600714f197611856ed503a7a5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 17 Apr 2018 08:55:44 -0500 Subject: drm/amdgpu: print the vbios version in the debugfs firmware info Useful for info gathering about what firmwares are in use in the driver. Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 4e15b6fe2839..d602f8b14c58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -31,6 +31,7 @@ #include "amdgpu_sched.h" #include "amdgpu_uvd.h" #include "amdgpu_vce.h" +#include "atom.h" #include #include @@ -1089,6 +1090,7 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; + struct atom_context *ctx = adev->mode_info.atom_context; int ret, i; /* VCE */ @@ -1211,6 +1213,9 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "VCN feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); + return 0; } -- cgit v1.2.1 From 58cd8fbc64b03d0e9961d627526bd07edbea00b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 17 Apr 2018 14:47:42 +0200 Subject: drm/amdgpu: limit reg_write_reg_wait workaround to SRIOV v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Turned out that this locks up some bare metal Vega10. v2: fix stupid typo Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 583f6f616dd3..6a19e0311a9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -4144,7 +4144,12 @@ static void gfx_v9_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, { int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX); - gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, ref, mask, 0x20); + if (amdgpu_sriov_vf(ring->adev)) + gfx_v9_0_wait_reg_mem(ring, usepfp, 0, 1, reg0, reg1, + ref, mask, 0x20); + else + amdgpu_ring_emit_reg_write_reg_wait_helper(ring, reg0, reg1, + ref, mask); } static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, -- cgit v1.2.1 From aa2b2e2822831d78a283edb12cf8b7da21bdd0ed Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 17 Apr 2018 11:52:53 +0800 Subject: drm/amdgpu: set preferred_domain independent of fallback handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When GEM needs to fallback to GTT for VRAM BOs we still want the preferred domain to be untouched so that the BO has a cance to move back to VRAM in the future. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 +++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 1 + 3 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 1200c5ba37da..ff606ce88837 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -62,6 +62,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.byte_align = alignment; bp.type = type; bp.resv = resv; + bp.preferred_domain = initial_domain; retry: bp.flags = flags; bp.domain = initial_domain; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index cac65e32a0b9..9258f0694922 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -360,6 +360,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; + u32 preferred_domains; size_t acc_size; int r; @@ -380,12 +381,14 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - bo->preferred_domains = bp->domain & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); + preferred_domains = bp->preferred_domain ? bp->preferred_domain : + bp->domain; + bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU | + AMDGPU_GEM_DOMAIN_GDS | + AMDGPU_GEM_DOMAIN_GWS | + AMDGPU_GEM_DOMAIN_OA); bo->allowed_domains = bo->preferred_domains; if (bp->type != ttm_bo_type_kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index e9a21d991e77..540e03fa159f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -37,6 +37,7 @@ struct amdgpu_bo_param { unsigned long size; int byte_align; u32 domain; + u32 preferred_domain; u64 flags; enum ttm_bo_type type; struct reservation_object *resv; -- cgit v1.2.1 From 3f188453faf7ba5b59e8064df4afffbc946e25ec Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 17 Apr 2018 18:34:40 +0800 Subject: drm/amdgpu: handle domain mask checking v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit if domain is illegal, we should return error. v2: remove duplicated domain checking. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 7 +------ drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +-------- 2 files changed, 2 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index ff606ce88837..c62c3dd4dcc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -229,12 +229,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, return -EINVAL; /* reject invalid gem domains */ - if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA)) + if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK) return -EINVAL; /* create a gem object to contain this object in */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 9258f0694922..feece0a491a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -360,7 +360,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, }; struct amdgpu_bo *bo; unsigned long page_align, size = bp->size; - u32 preferred_domains; size_t acc_size; int r; @@ -381,14 +380,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, drm_gem_private_object_init(adev->ddev, &bo->gem_base, size); INIT_LIST_HEAD(&bo->shadow_list); INIT_LIST_HEAD(&bo->va); - preferred_domains = bp->preferred_domain ? bp->preferred_domain : + bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain : bp->domain; - bo->preferred_domains = preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT | - AMDGPU_GEM_DOMAIN_CPU | - AMDGPU_GEM_DOMAIN_GDS | - AMDGPU_GEM_DOMAIN_GWS | - AMDGPU_GEM_DOMAIN_OA); bo->allowed_domains = bo->preferred_domains; if (bp->type != ttm_bo_type_kernel && bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) -- cgit v1.2.1 From d240cd9eddd943dbe0267d081697195ff1e90b65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marek=20Ol=C5=A1=C3=A1k?= Date: Tue, 3 Apr 2018 13:05:03 -0400 Subject: drm/amdgpu: optionally do a writeback but don't invalidate TC for IB fences MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is a new IB flag that enables this new behavior. Full invalidation is unnecessary for RELEASE_MEM and doesn't make sense when draw calls from two adjacent gfx IBs run in parallel. This will be the new default for Mesa. v2: bump the version Signed-off-by: Marek Olšák Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 11 +++++++---- drivers/gpu/drm/amd/amdgpu/soc15d.h | 1 + 7 files changed, 23 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 5c0567ad1ba7..7c17a0bc2cd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -75,9 +75,10 @@ * - 3.23.0 - Add query for VRAM lost counter * - 3.24.0 - Add high priority compute support for gfx9 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk). + * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 25 +#define KMS_DRIVER_MINOR 26 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 97449e06a242..d09fcab2398f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -131,7 +131,8 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + unsigned flags) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; @@ -149,7 +150,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f) adev->fence_context + ring->idx, seq); amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, - seq, AMDGPU_FENCE_FLAG_INT); + seq, flags | AMDGPU_FENCE_FLAG_INT); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; /* This function can't be called concurrently anyway, otherwise diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 311589e02d17..f70eeed9ed76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -127,6 +127,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, struct amdgpu_vm *vm; uint64_t fence_ctx; uint32_t status = 0, alloc_size; + unsigned fence_flags = 0; unsigned i; int r = 0; @@ -227,7 +228,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, #endif amdgpu_asic_invalidate_hdp(adev, ring); - r = amdgpu_fence_emit(ring, f); + if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE) + fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY; + + r = amdgpu_fence_emit(ring, f, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -242,7 +246,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, /* wrap the last IB with fence */ if (job && job->uf_addr) { amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, - AMDGPU_FENCE_FLAG_64BIT); + fence_flags | AMDGPU_FENCE_FLAG_64BIT); } if (patch_offset != ~0 && ring->funcs->patch_cond_exec) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 08fcdf6f7b53..4f8dac2d36a5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -42,6 +42,7 @@ #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) #define AMDGPU_FENCE_FLAG_INT (1 << 1) +#define AMDGPU_FENCE_FLAG_TC_WB_ONLY (1 << 2) enum amdgpu_ring_type { AMDGPU_RING_TYPE_GFX, @@ -90,7 +91,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, unsigned irq_type); void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); void amdgpu_fence_driver_resume(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, + unsigned flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9ec7c1041df2..9c2195a2896d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -633,7 +633,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid); if (vm_flush_needed || pasid_mapping_needed) { - r = amdgpu_fence_emit(ring, &fence); + r = amdgpu_fence_emit(ring, &fence, 0); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6a19e0311a9c..05b2d34110b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3775,13 +3775,16 @@ static void gfx_v9_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, { bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; bool int_sel = flags & AMDGPU_FENCE_FLAG_INT; + bool writeback = flags & AMDGPU_FENCE_FLAG_TC_WB_ONLY; /* RELEASE_MEM - flush caches, send int */ amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 6)); - amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | - EOP_TC_ACTION_EN | - EOP_TC_WB_ACTION_EN | - EOP_TC_MD_ACTION_EN | + amdgpu_ring_write(ring, ((writeback ? (EOP_TC_WB_ACTION_EN | + EOP_TC_NC_ACTION_EN) : + (EOP_TCL1_ACTION_EN | + EOP_TC_ACTION_EN | + EOP_TC_WB_ACTION_EN | + EOP_TC_MD_ACTION_EN)) | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5))); amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h index 7f408f85fdb6..839a144c1645 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15d.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h @@ -159,6 +159,7 @@ #define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ #define EOP_TCL1_ACTION_EN (1 << 16) #define EOP_TC_ACTION_EN (1 << 17) /* L2 */ +#define EOP_TC_NC_ACTION_EN (1 << 19) #define EOP_TC_MD_ACTION_EN (1 << 21) /* L2 metadata */ #define DATA_SEL(x) ((x) << 29) -- cgit v1.2.1 From 7fd645f258711a4ea4d777188949494f9e68b787 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 18 Apr 2018 18:35:09 +0800 Subject: drm/amdgpu: fix list not initialized MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise, cpu stuck for 22s with kernel panic. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9c2195a2896d..8c34060e130f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1568,10 +1568,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, * the evicted list so that it gets validated again on the * next command submission. */ + list_del_init(&bo_va->base.vm_status); if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) list_add_tail(&bo_va->base.vm_status, &vm->evicted); - else - list_del_init(&bo_va->base.vm_status); } else { list_del_init(&bo_va->base.vm_status); } -- cgit v1.2.1 From 6197ae28911841369ff61ebbdf9d732ff6069138 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 19 Apr 2018 12:40:15 +0800 Subject: drm/amd/pp: Fix NULL point check error in smu_set_watermarks_for_clocks_ranges It is caused by 'commit d6c9a7dc86cd ("drm/amd/pp: Move common code to smu_helper.c")' Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c index 7c23741619b6..93a3d022ba47 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c @@ -657,7 +657,7 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table, uint32_t i; struct watermarks *table = wt_table; - if (!table || wm_with_clock_ranges) + if (!table || !wm_with_clock_ranges) return -EINVAL; if (wm_with_clock_ranges->num_wm_sets_dmif > 4 || wm_with_clock_ranges->num_wm_sets_mcif > 4) -- cgit v1.2.1 From bfa8eea29b12e403b391820b7ef5cf5c77ab0afe Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Wed, 18 Apr 2018 17:12:19 +0800 Subject: drm/amdgpu: init gfx9 aperture settings fix settings. Signed-off-by: Flora Cui Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +-- 2 files changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 05b2d34110b7..587a8731fa31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1617,7 +1617,10 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev) tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE, SH_MEM_ALIGNMENT_MODE_UNALIGNED); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, tmp); - tmp = adev->gmc.shared_aperture_start >> 48; + tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE, + (adev->gmc.private_aperture_start >> 48)); + tmp = REG_SET_FIELD(tmp, SH_MEM_BASES, SHARED_BASE, + (adev->gmc.shared_aperture_start >> 48)); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, tmp); } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index e6b00b507d4d..6c9f7f999532 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -562,8 +562,7 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.shared_aperture_start = 0x2000000000000000ULL; adev->gmc.shared_aperture_end = adev->gmc.shared_aperture_start + (4ULL << 30) - 1; - adev->gmc.private_aperture_start = - adev->gmc.shared_aperture_end + 1; + adev->gmc.private_aperture_start = 0x1000000000000000ULL; adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; -- cgit v1.2.1 From bb475839eca7e3990f59a3b4e9e810635ef0ac4a Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Thu, 19 Apr 2018 13:17:26 +0800 Subject: drm/amdgpu: simplify bo_va list when vm bo update (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: fix compiling warning Signed-off-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8c34060e130f..6a372ca11ee3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1509,6 +1509,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct drm_mm_node *nodes; struct dma_fence *exclusive, **last_update; uint64_t flags; + uint32_t mem_type; int r; if (clear || !bo_va->base.bo) { @@ -1561,19 +1562,16 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, } spin_lock(&vm->status_lock); - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - unsigned mem_type = bo->tbo.mem.mem_type; + list_del_init(&bo_va->base.vm_status); - /* If the BO is not in its preferred location add it back to - * the evicted list so that it gets validated again on the - * next command submission. - */ - list_del_init(&bo_va->base.vm_status); - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) - list_add_tail(&bo_va->base.vm_status, &vm->evicted); - } else { - list_del_init(&bo_va->base.vm_status); - } + /* If the BO is not in its preferred location add it back to + * the evicted list so that it gets validated again on the + * next command submission. + */ + mem_type = bo->tbo.mem.mem_type; + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && + !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + list_add_tail(&bo_va->base.vm_status, &vm->evicted); spin_unlock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); -- cgit v1.2.1 From 1a3132a1cc03abcf153d08f4eb471cd7d396f2a3 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 17 Apr 2018 21:49:51 +0800 Subject: drm/amd/powerplay: header file interface to SMU update update vega12 smu interface. Signed-off-by: Kenneth Feng Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,8 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[15]; + uint32_t Reserved[14]; -- cgit v1.2.1 From b1f223c02a3a2b41847f48f75797eba5979ea25d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Sun, 25 Mar 2018 10:10:25 +0200 Subject: drm/amdgpu: print DMA-buf status in debugfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just note if a BO was imported/exported. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index c62c3dd4dcc6..7d3dc229fa47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -780,6 +780,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct seq_file *m = data; + struct dma_buf_attachment *attachment; + struct dma_buf *dma_buf; unsigned domain; const char *placement; unsigned pin_count; @@ -808,6 +810,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) pin_count = READ_ONCE(bo->pin_count); if (pin_count) seq_printf(m, " pin count %d", pin_count); + + dma_buf = READ_ONCE(bo->gem_base.dma_buf); + attachment = READ_ONCE(bo->gem_base.import_attach); + + if (attachment) + seq_printf(m, " imported from %p", dma_buf); + else if (dma_buf) + seq_printf(m, " exported as %p", dma_buf); + seq_printf(m, "\n"); return 0; -- cgit v1.2.1 From e0e93d03efa1c53012cc609fd48112df3e06da69 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Fri, 20 Apr 2018 13:55:39 +0800 Subject: drm/amd/powerplay: add registry key to disable ACG For the dummy ACG fuses,need to disable ACG, otherwise corruption will be caused. Signed-off-by: Kenneth Feng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c | 5 +++++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 2 files changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c index 7fa1ba89ac54..888ddca902d8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c @@ -224,6 +224,11 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable ppsmc_pptable->AcgGfxclkSpreadPercent = smc_dpm_table.acggfxclkspreadpercent; ppsmc_pptable->AcgGfxclkSpreadFreq = smc_dpm_table.acggfxclkspreadfreq; + /* 0xFFFF will disable the ACG feature */ + if (!(hwmgr->feature_mask & PP_ACG_MASK)) { + ppsmc_pptable->AcgThresholdFreqHigh = 0xFFFF; + ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF; + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 9b3dd7dce4e2..2f203ec3d19c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -82,6 +82,7 @@ enum PP_FEATURE_MASK { PP_SOCCLK_DPM_MASK = 0x1000, PP_DCEFCLK_DPM_MASK = 0x2000, PP_OVERDRIVE_MASK = 0x4000, + PP_ACG_MASK = 0x10000, }; enum PHM_BackEnd_Magic { -- cgit v1.2.1 From cf671071334ebbf6c960f88383b35b99d5d53212 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 5 Dec 2017 18:48:48 +0800 Subject: drm/amdgpu: update psp gfx if header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 67 ++++++++++++++++++++++----------- 1 file changed, 46 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index 8da6da90b1c9..0cf48d26c676 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -40,11 +40,20 @@ enum psp_gfx_crtl_cmd_id GFX_CTRL_CMD_ID_INIT_GPCOM_RING = 0x00020000, /* initialize GPCOM ring */ GFX_CTRL_CMD_ID_DESTROY_RINGS = 0x00030000, /* destroy rings */ GFX_CTRL_CMD_ID_CAN_INIT_RINGS = 0x00040000, /* is it allowed to initialized the rings */ + GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */ + GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */ + GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */ GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */ }; +/*----------------------------------------------------------------------------- + NOTE: All physical addresses used in this interface are actually + GPU Virtual Addresses. +*/ + + /* Control registers of the TEE Gfx interface. These are located in * SRBM-to-PSP mailbox registers (total 8 registers). */ @@ -55,8 +64,8 @@ struct psp_gfx_ctrl volatile uint32_t rbi_rptr; /* +8 Read pointer (index) of RBI ring */ volatile uint32_t gpcom_wptr; /* +12 Write pointer (index) of GPCOM ring */ volatile uint32_t gpcom_rptr; /* +16 Read pointer (index) of GPCOM ring */ - volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of physical address of ring buffer */ - volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of physical address of ring buffer */ + volatile uint32_t ring_addr_lo; /* +20 bits [31:0] of GPU Virtual of ring buffer (VMID=0)*/ + volatile uint32_t ring_addr_hi; /* +24 bits [63:32] of GPU Virtual of ring buffer (VMID=0) */ volatile uint32_t ring_buf_size; /* +28 Ring buffer size (in bytes) */ }; @@ -78,6 +87,8 @@ enum psp_gfx_cmd_id GFX_CMD_ID_LOAD_ASD = 0x00000004, /* load ASD Driver */ GFX_CMD_ID_SETUP_TMR = 0x00000005, /* setup TMR region */ GFX_CMD_ID_LOAD_IP_FW = 0x00000006, /* load HW IP FW */ + GFX_CMD_ID_DESTROY_TMR = 0x00000007, /* destroy TMR region */ + GFX_CMD_ID_SAVE_RESTORE = 0x00000008, /* save/restore HW IP FW */ }; @@ -85,11 +96,11 @@ enum psp_gfx_cmd_id /* Command to load Trusted Application binary into PSP OS. */ struct psp_gfx_cmd_load_ta { - uint32_t app_phy_addr_lo; /* bits [31:0] of the physical address of the TA binary (must be 4 KB aligned) */ - uint32_t app_phy_addr_hi; /* bits [63:32] of the physical address of the TA binary */ + uint32_t app_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of the TA binary (must be 4 KB aligned) */ + uint32_t app_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of the TA binary */ uint32_t app_len; /* length of the TA binary in bytes */ - uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the physical address of CMD buffer (must be 4 KB aligned) */ - uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the physical address of CMD buffer */ + uint32_t cmd_buf_phy_addr_lo; /* bits [31:0] of the GPU Virtual address of CMD buffer (must be 4 KB aligned) */ + uint32_t cmd_buf_phy_addr_hi; /* bits [63:32] of the GPU Virtual address of CMD buffer */ uint32_t cmd_buf_len; /* length of the CMD buffer in bytes; must be multiple of 4 KB */ /* Note: CmdBufLen can be set to 0. In this case no persistent CMD buffer is provided @@ -111,8 +122,8 @@ struct psp_gfx_cmd_unload_ta */ struct psp_gfx_buf_desc { - uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of the buffer (must be 4 KB aligned) */ - uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of the buffer */ + uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of the buffer (must be 4 KB aligned) */ + uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of the buffer */ uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB and no bigger than 64 MB) */ }; @@ -145,8 +156,8 @@ struct psp_gfx_cmd_invoke_cmd /* Command to setup TMR region. */ struct psp_gfx_cmd_setup_tmr { - uint32_t buf_phy_addr_lo; /* bits [31:0] of physical address of TMR buffer (must be 4 KB aligned) */ - uint32_t buf_phy_addr_hi; /* bits [63:32] of physical address of TMR buffer */ + uint32_t buf_phy_addr_lo; /* bits [31:0] of GPU Virtual address of TMR buffer (must be 4 KB aligned) */ + uint32_t buf_phy_addr_hi; /* bits [63:32] of GPU Virtual address of TMR buffer */ uint32_t buf_size; /* buffer size in bytes (must be multiple of 4 KB) */ }; @@ -174,18 +185,32 @@ enum psp_gfx_fw_type GFX_FW_TYPE_ISP = 16, GFX_FW_TYPE_ACP = 17, GFX_FW_TYPE_SMU = 18, + GFX_FW_TYPE_MMSCH = 19, + GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM = 20, + GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM = 21, + GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL = 22, + GFX_FW_TYPE_MAX = 23 }; /* Command to load HW IP FW. */ struct psp_gfx_cmd_load_ip_fw { - uint32_t fw_phy_addr_lo; /* bits [31:0] of physical address of FW location (must be 4 KB aligned) */ - uint32_t fw_phy_addr_hi; /* bits [63:32] of physical address of FW location */ + uint32_t fw_phy_addr_lo; /* bits [31:0] of GPU Virtual address of FW location (must be 4 KB aligned) */ + uint32_t fw_phy_addr_hi; /* bits [63:32] of GPU Virtual address of FW location */ uint32_t fw_size; /* FW buffer size in bytes */ enum psp_gfx_fw_type fw_type; /* FW type */ }; +/* Command to save/restore HW IP FW. */ +struct psp_gfx_cmd_save_restore_ip_fw +{ + uint32_t save_fw; /* if set, command is used for saving fw otherwise for resetoring*/ + uint32_t save_restore_addr_lo; /* bits [31:0] of FB address of GART memory used as save/restore buffer (must be 4 KB aligned) */ + uint32_t save_restore_addr_hi; /* bits [63:32] of FB address of GART memory used as save/restore buffer */ + uint32_t buf_size; /* Size of the save/restore buffer in bytes */ + enum psp_gfx_fw_type fw_type; /* FW type */ +}; /* All GFX ring buffer commands. */ union psp_gfx_commands @@ -195,7 +220,7 @@ union psp_gfx_commands struct psp_gfx_cmd_invoke_cmd cmd_invoke_cmd; struct psp_gfx_cmd_setup_tmr cmd_setup_tmr; struct psp_gfx_cmd_load_ip_fw cmd_load_ip_fw; - + struct psp_gfx_cmd_save_restore_ip_fw cmd_save_restore_ip_fw; }; @@ -226,8 +251,8 @@ struct psp_gfx_cmd_resp /* These fields are used for RBI only. They are all 0 in GPCOM commands */ - uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of physical address of response buffer (must be 4 KB aligned) */ - uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of physical address of response buffer */ + uint32_t resp_buf_addr_lo; /* +12 bits [31:0] of GPU Virtual address of response buffer (must be 4 KB aligned) */ + uint32_t resp_buf_addr_hi; /* +16 bits [63:32] of GPU Virtual address of response buffer */ uint32_t resp_offset; /* +20 offset within response buffer */ uint32_t resp_buf_size; /* +24 total size of the response buffer in bytes */ @@ -251,19 +276,19 @@ struct psp_gfx_cmd_resp /* Structure of the Ring Buffer Frame */ struct psp_gfx_rb_frame { - uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of physical address of command buffer (must be 4 KB aligned) */ - uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of physical address of command buffer */ + uint32_t cmd_buf_addr_lo; /* +0 bits [31:0] of GPU Virtual address of command buffer (must be 4 KB aligned) */ + uint32_t cmd_buf_addr_hi; /* +4 bits [63:32] of GPU Virtual address of command buffer */ uint32_t cmd_buf_size; /* +8 command buffer size in bytes */ - uint32_t fence_addr_lo; /* +12 bits [31:0] of physical address of Fence for this frame */ - uint32_t fence_addr_hi; /* +16 bits [63:32] of physical address of Fence for this frame */ + uint32_t fence_addr_lo; /* +12 bits [31:0] of GPU Virtual address of Fence for this frame */ + uint32_t fence_addr_hi; /* +16 bits [63:32] of GPU Virtual address of Fence for this frame */ uint32_t fence_value; /* +20 Fence value */ uint32_t sid_lo; /* +24 bits [31:0] of SID value (used only for RBI frames) */ uint32_t sid_hi; /* +28 bits [63:32] of SID value (used only for RBI frames) */ uint8_t vmid; /* +32 VMID value used for mapping of all addresses for this frame */ uint8_t frame_type; /* +33 1: destory context frame, 0: all other frames; used only for RBI frames */ uint8_t reserved1[2]; /* +34 reserved, must be 0 */ - uint32_t reserved2[7]; /* +40 reserved, must be 0 */ - /* total 64 bytes */ + uint32_t reserved2[7]; /* +36 reserved, must be 0 */ + /* total 64 bytes */ }; #endif /* _PSP_TEE_GFX_IF_H_ */ -- cgit v1.2.1 From d40e9b13c8bad15e56f2e8c9572f62c1229833a6 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 22 Jan 2018 17:51:35 +0800 Subject: drm/amdgpu: add new rlc firmware header format v2.1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 34 +++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 19 +++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index dd6f98921918..84d652599d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -161,8 +161,38 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr) le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes)); DRM_DEBUG("reg_list_separate_size_bytes: %u\n", le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); - DRM_DEBUG("reg_list_separate_size_bytes: %u\n", - le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes)); + DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n", + le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes)); + if (version_minor == 1) { + const struct rlc_firmware_header_v2_1 *v2_1 = + container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0); + DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n", + le32_to_cpu(v2_1->reg_list_format_direct_reg_list_length)); + DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_cntl_ucode_ver)); + DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_cntl_feature_ver)); + DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n", + le32_to_cpu(v2_1->save_restore_list_cntl_size_bytes)); + DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n", + le32_to_cpu(v2_1->save_restore_list_cntl_offset_bytes)); + DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_gpm_ucode_ver)); + DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_gpm_feature_ver)); + DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n", + le32_to_cpu(v2_1->save_restore_list_gpm_size_bytes)); + DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n", + le32_to_cpu(v2_1->save_restore_list_gpm_offset_bytes)); + DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_srm_ucode_ver)); + DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n", + le32_to_cpu(v2_1->save_restore_list_srm_feature_ver)); + DRM_DEBUG("save_restore_list_srm_size_bytes %u\n", + le32_to_cpu(v2_1->save_restore_list_srm_size_bytes)); + DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n", + le32_to_cpu(v2_1->save_restore_list_srm_offset_bytes)); + } } else { DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 30b5500dc152..0b262f4bb4fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -98,6 +98,24 @@ struct rlc_firmware_header_v2_0 { uint32_t reg_list_separate_array_offset_bytes; /* payload offset from the start of the header */ }; +/* version_major=2, version_minor=1 */ +struct rlc_firmware_header_v2_1 { + struct rlc_firmware_header_v2_0 v2_0; + uint32_t reg_list_format_direct_reg_list_length; /* length of direct reg list format array */ + uint32_t save_restore_list_cntl_ucode_ver; + uint32_t save_restore_list_cntl_feature_ver; + uint32_t save_restore_list_cntl_size_bytes; + uint32_t save_restore_list_cntl_offset_bytes; + uint32_t save_restore_list_gpm_ucode_ver; + uint32_t save_restore_list_gpm_feature_ver; + uint32_t save_restore_list_gpm_size_bytes; + uint32_t save_restore_list_gpm_offset_bytes; + uint32_t save_restore_list_srm_ucode_ver; + uint32_t save_restore_list_srm_feature_ver; + uint32_t save_restore_list_srm_size_bytes; + uint32_t save_restore_list_srm_offset_bytes; +}; + /* version_major=1, version_minor=0 */ struct sdma_firmware_header_v1_0 { struct common_firmware_header header; @@ -148,6 +166,7 @@ union amdgpu_firmware_header { struct gfx_firmware_header_v1_0 gfx; struct rlc_firmware_header_v1_0 rlc; struct rlc_firmware_header_v2_0 rlc_v2_0; + struct rlc_firmware_header_v2_1 rlc_v2_1; struct sdma_firmware_header_v1_0 sdma; struct sdma_firmware_header_v1_1 sdma_v1_1; struct gpu_info_firmware_header_v1_0 gpu_info; -- cgit v1.2.1 From 621a6318adea69b08a3652c64bc7cc0cb4dacfb4 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Mon, 22 Jan 2018 20:48:14 +0800 Subject: drm/amdgpu: add save restore list cntl gpm and srm firmware support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RLC save/restore list cntl/gpm_mem/srm_mem ucodes are used for CGPG and gfxoff function. Signed-off-by: Huang Rui Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 15 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 36 ++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 17 +++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 3 ++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 55 +++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/amdgpu/psp_v10_0.c | 9 +++++ 6 files changed, 132 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d64ef30fed47..5ad893915a85 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -774,9 +774,18 @@ struct amdgpu_rlc { u32 starting_offsets_start; u32 reg_list_format_size_bytes; u32 reg_list_size_bytes; + u32 reg_list_format_direct_reg_list_length; + u32 save_restore_list_cntl_size_bytes; + u32 save_restore_list_gpm_size_bytes; + u32 save_restore_list_srm_size_bytes; u32 *register_list_format; u32 *register_restore; + u8 *save_restore_list_cntl; + u8 *save_restore_list_gpm; + u8 *save_restore_list_srm; + + bool is_rlc_v2_1; }; #define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES @@ -943,6 +952,12 @@ struct amdgpu_gfx { uint32_t ce_feature_version; uint32_t pfp_feature_version; uint32_t rlc_feature_version; + uint32_t rlc_srlc_fw_version; + uint32_t rlc_srlc_feature_version; + uint32_t rlc_srlg_fw_version; + uint32_t rlc_srlg_feature_version; + uint32_t rlc_srls_fw_version; + uint32_t rlc_srls_feature_version; uint32_t mec_feature_version; uint32_t mec2_feature_version; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d602f8b14c58..eb4785e51573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -215,6 +215,18 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->gfx.rlc_fw_version; fw_info->feature = adev->gfx.rlc_feature_version; break; + case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL: + fw_info->ver = adev->gfx.rlc_srlc_fw_version; + fw_info->feature = adev->gfx.rlc_srlc_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM: + fw_info->ver = adev->gfx.rlc_srlg_fw_version; + fw_info->feature = adev->gfx.rlc_srlg_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM: + fw_info->ver = adev->gfx.rlc_srls_fw_version; + fw_info->feature = adev->gfx.rlc_srls_feature_version; + break; case AMDGPU_INFO_FW_GFX_MEC: if (query_fw->index == 0) { fw_info->ver = adev->gfx.mec_fw_version; @@ -1149,6 +1161,30 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* RLC SAVE RESTORE LIST CNTL */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC SRLC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC SAVE RESTORE LIST GPM MEM */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC SRLG feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC SAVE RESTORE LIST SRM MEM */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC SRLS feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + /* MEC */ query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; query_fw.index = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 84d652599d5b..0c74c09ef3b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -337,7 +337,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 && ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT && - ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT)) { + ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + @@ -359,6 +362,18 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, le32_to_cpu(header->ucode_array_offset_bytes) + le32_to_cpu(cp_hdr->jt_offset) * 4), ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { + ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; + memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, + ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) { + ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes; + memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm, + ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes; + memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm, + ucode->ucode_size); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 0b262f4bb4fc..08e38579af24 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -187,6 +187,9 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_CP_MEC2, AMDGPU_UCODE_ID_CP_MEC2_JT, AMDGPU_UCODE_ID_RLC_G, + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, AMDGPU_UCODE_ID_STORAGE, AMDGPU_UCODE_ID_SMC, AMDGPU_UCODE_ID_UVD, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 587a8731fa31..73b76fa29bad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -41,7 +41,6 @@ #define GFX9_MEC_HPD_SIZE 2048 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L #define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L -#define GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH 34 #define mmPWR_MISC_CNTL_STATUS 0x0183 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 @@ -401,6 +400,27 @@ static void gfx_v9_0_free_microcode(struct amdgpu_device *adev) kfree(adev->gfx.rlc.register_list_format); } +static void gfx_v9_0_init_rlc_ext_microcode(struct amdgpu_device *adev) +{ + const struct rlc_firmware_header_v2_1 *rlc_hdr; + + rlc_hdr = (const struct rlc_firmware_header_v2_1 *)adev->gfx.rlc_fw->data; + adev->gfx.rlc_srlc_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_ucode_ver); + adev->gfx.rlc_srlc_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_cntl_feature_ver); + adev->gfx.rlc.save_restore_list_cntl_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_cntl_size_bytes); + adev->gfx.rlc.save_restore_list_cntl = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_cntl_offset_bytes); + adev->gfx.rlc_srlg_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_ucode_ver); + adev->gfx.rlc_srlg_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_gpm_feature_ver); + adev->gfx.rlc.save_restore_list_gpm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_gpm_size_bytes); + adev->gfx.rlc.save_restore_list_gpm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_gpm_offset_bytes); + adev->gfx.rlc_srls_fw_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_ucode_ver); + adev->gfx.rlc_srls_feature_version = le32_to_cpu(rlc_hdr->save_restore_list_srm_feature_ver); + adev->gfx.rlc.save_restore_list_srm_size_bytes = le32_to_cpu(rlc_hdr->save_restore_list_srm_size_bytes); + adev->gfx.rlc.save_restore_list_srm = (u8 *)rlc_hdr + le32_to_cpu(rlc_hdr->save_restore_list_srm_offset_bytes); + adev->gfx.rlc.reg_list_format_direct_reg_list_length = + le32_to_cpu(rlc_hdr->reg_list_format_direct_reg_list_length); +} + static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -412,6 +432,8 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) const struct rlc_firmware_header_v2_0 *rlc_hdr; unsigned int *tmp = NULL; unsigned int i = 0; + uint16_t version_major; + uint16_t version_minor; DRM_DEBUG("\n"); @@ -468,6 +490,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) goto out; err = amdgpu_ucode_validate(adev->gfx.rlc_fw); rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; + + version_major = le16_to_cpu(rlc_hdr->header.header_version_major); + version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor); + if (version_major == 2 && version_minor == 1) + adev->gfx.rlc.is_rlc_v2_1 = true; + adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version); adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version); adev->gfx.rlc.save_and_restore_offset = @@ -508,6 +536,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++) adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]); + if (adev->gfx.rlc.is_rlc_v2_1) + gfx_v9_0_init_rlc_ext_microcode(adev); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); if (err) @@ -566,6 +597,26 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) adev->firmware.fw_size += ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); + if (adev->gfx.rlc.is_rlc_v2_1) { + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_cntl_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_gpm_size_bytes, PAGE_SIZE); + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM]; + info->ucode_id = AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM; + info->fw = adev->gfx.rlc_fw; + adev->firmware.fw_size += + ALIGN(adev->gfx.rlc.save_restore_list_srm_size_bytes, PAGE_SIZE); + } + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1; info->fw = adev->gfx.mec_fw; @@ -1781,7 +1832,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) /* setup unique_indirect_regs array and indirect_start_offsets array */ gfx_v9_0_parse_ind_reg_list(register_list_format, - GFX9_RLC_FORMAT_DIRECT_REG_LIST_LENGTH, + adev->gfx.rlc.reg_list_format_direct_reg_list_length, adev->gfx.rlc.reg_list_format_size_bytes >> 2, unique_indirect_regs, &unique_indirect_reg_count, diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 8873d833a7f7..0ff136d02d9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -70,6 +70,15 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type * case AMDGPU_UCODE_ID_RLC_G: *type = GFX_FW_TYPE_RLC_G; break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_CNTL; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_GPM_MEM; + break; + case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM: + *type = GFX_FW_TYPE_RLC_RESTORE_LIST_SRM_MEM; + break; case AMDGPU_UCODE_ID_SMC: *type = GFX_FW_TYPE_SMU; break; -- cgit v1.2.1 From 72408a41d0d78dbbd7fe7e24849c683596c8b79a Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 21 Dec 2017 15:03:31 +0800 Subject: drm/amdgpu: enter rlc safe mode before set cgpg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Hawking Zhang Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 73b76fa29bad..69370f0df4b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3394,8 +3394,7 @@ static void gfx_v9_0_exit_rlc_safe_mode(struct amdgpu_device *adev) static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, bool enable) { - /* TODO: double check if we need to perform under safe mdoe */ - /* gfx_v9_0_enter_rlc_safe_mode(adev); */ + gfx_v9_0_enter_rlc_safe_mode(adev); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); @@ -3406,7 +3405,7 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } - /* gfx_v9_0_exit_rlc_safe_mode(adev); */ + gfx_v9_0_exit_rlc_safe_mode(adev); } static void gfx_v9_0_update_gfx_mg_power_gating(struct amdgpu_device *adev, @@ -3797,7 +3796,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, } amdgpu_ring_write(ring, header); -BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ + BUG_ON(ib->gpu_addr & 0x3); /* Dword align */ amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | -- cgit v1.2.1 From a5acf930269e71c76a7e7ad6819a86919c752fb1 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 21 Dec 2017 15:48:27 +0800 Subject: drm/amdgpu: cleanup init power gating function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove gfx_v9_0_enable_sck_slow_down_on_power_up/down and CP power gating enabling functions because they only need to be called on setting power gating behavior. We keep it in set_powergating callback to enable/disable PG in late_init. Signed-off-by: Huang Rui Acked-by: Hawking Zhang Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 69370f0df4b4..eff1fd14b01f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2065,6 +2065,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad static void gfx_v9_0_init_pg(struct amdgpu_device *adev) { + if (!adev->gfx.rlc.is_rlc_v2_1) + return; + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | AMD_PG_SUPPORT_GFX_DMG | @@ -2075,24 +2078,9 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) gfx_v9_0_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); - if (adev->asic_type == CHIP_RAVEN) { - WREG32(mmRLC_JUMP_TABLE_RESTORE, - adev->gfx.rlc.cp_table_gpu_addr >> 8); - gfx_v9_0_init_gfx_power_gating(adev); - - if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { - gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true); - gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true); - } else { - gfx_v9_0_enable_sck_slow_down_on_power_up(adev, false); - gfx_v9_0_enable_sck_slow_down_on_power_down(adev, false); - } - - if (adev->pg_flags & AMD_PG_SUPPORT_CP) - gfx_v9_0_enable_cp_power_gating(adev, true); - else - gfx_v9_0_enable_cp_power_gating(adev, false); - } + WREG32(mmRLC_JUMP_TABLE_RESTORE, + adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v9_0_init_gfx_power_gating(adev); } } -- cgit v1.2.1 From 727b888f6643b69db2cad1a9f0ae5f8804fa12cd Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 21 Dec 2017 16:13:02 +0800 Subject: drm/amdgpu: revise init_rlc_save_restore_list behavior to support latest register_list_format/register_restore table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RLC save/restore list will be used on CGPG and GFXOFF function, it loads two bin table of register_list_format/register_restore in RLC firmware. Signed-off-by: Huang Rui Acked-by: Hawking Zhang Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 141 +++++++++++++++++++++------------- 1 file changed, 87 insertions(+), 54 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index eff1fd14b01f..3abd91f27e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -184,6 +184,30 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000) }; +static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = +{ + mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_1 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_2 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_3 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_4 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_5 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_6 - mmRLC_SRM_INDEX_CNTL_ADDR_0, + mmRLC_SRM_INDEX_CNTL_ADDR_7 - mmRLC_SRM_INDEX_CNTL_ADDR_0, +}; + +static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = +{ + mmRLC_SRM_INDEX_CNTL_DATA_0 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_1 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_2 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_3 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_4 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_5 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_6 - mmRLC_SRM_INDEX_CNTL_DATA_0, + mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, +}; + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -1763,55 +1787,42 @@ static void gfx_v9_0_init_csb(struct amdgpu_device *adev) adev->gfx.rlc.clear_state_size); } -static void gfx_v9_0_parse_ind_reg_list(int *register_list_format, +static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, int indirect_offset, int list_size, int *unique_indirect_regs, int *unique_indirect_reg_count, - int max_indirect_reg_count, int *indirect_start_offsets, - int *indirect_start_offsets_count, - int max_indirect_start_offsets_count) + int *indirect_start_offsets_count) { int idx; - bool new_entry = true; for (; indirect_offset < list_size; indirect_offset++) { + indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; + *indirect_start_offsets_count = *indirect_start_offsets_count + 1; - if (new_entry) { - new_entry = false; - indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; - *indirect_start_offsets_count = *indirect_start_offsets_count + 1; - BUG_ON(*indirect_start_offsets_count >= max_indirect_start_offsets_count); - } + while (register_list_format[indirect_offset] != 0xFFFFFFFF) { + indirect_offset += 2; - if (register_list_format[indirect_offset] == 0xFFFFFFFF) { - new_entry = true; - continue; - } + /* look for the matching indice */ + for (idx = 0; idx < *unique_indirect_reg_count; idx++) { + if (unique_indirect_regs[idx] == + register_list_format[indirect_offset] || + !unique_indirect_regs[idx]) + break; + } - indirect_offset += 2; + BUG_ON(idx >= *unique_indirect_reg_count); - /* look for the matching indice */ - for (idx = 0; idx < *unique_indirect_reg_count; idx++) { - if (unique_indirect_regs[idx] == - register_list_format[indirect_offset]) - break; - } + if (!unique_indirect_regs[idx]) + unique_indirect_regs[idx] = register_list_format[indirect_offset]; - if (idx >= *unique_indirect_reg_count) { - unique_indirect_regs[*unique_indirect_reg_count] = - register_list_format[indirect_offset]; - idx = *unique_indirect_reg_count; - *unique_indirect_reg_count = *unique_indirect_reg_count + 1; - BUG_ON(*unique_indirect_reg_count >= max_indirect_reg_count); + indirect_offset++; } - - register_list_format[indirect_offset] = idx; } } -static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) +static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) { int unique_indirect_regs[] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}; int unique_indirect_reg_count = 0; @@ -1820,7 +1831,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) int indirect_start_offsets_count = 0; int list_size = 0; - int i = 0; + int i = 0, j = 0; u32 tmp = 0; u32 *register_list_format = @@ -1831,15 +1842,14 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) adev->gfx.rlc.reg_list_format_size_bytes); /* setup unique_indirect_regs array and indirect_start_offsets array */ - gfx_v9_0_parse_ind_reg_list(register_list_format, - adev->gfx.rlc.reg_list_format_direct_reg_list_length, - adev->gfx.rlc.reg_list_format_size_bytes >> 2, - unique_indirect_regs, - &unique_indirect_reg_count, - ARRAY_SIZE(unique_indirect_regs), - indirect_start_offsets, - &indirect_start_offsets_count, - ARRAY_SIZE(indirect_start_offsets)); + unique_indirect_reg_count = ARRAY_SIZE(unique_indirect_regs); + gfx_v9_1_parse_ind_reg_list(register_list_format, + adev->gfx.rlc.reg_list_format_direct_reg_list_length, + adev->gfx.rlc.reg_list_format_size_bytes >> 2, + unique_indirect_regs, + &unique_indirect_reg_count, + indirect_start_offsets, + &indirect_start_offsets_count); /* enable auto inc in case it is disabled */ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); @@ -1853,19 +1863,37 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), adev->gfx.rlc.register_restore[i]); - /* load direct register */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_ADDR), 0); - for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++) - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_ARAM_DATA), - adev->gfx.rlc.register_restore[i]); - /* load indirect register */ WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_ADDR), adev->gfx.rlc.reg_list_format_start); - for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++) + + /* direct register portion */ + for (i = 0; i < adev->gfx.rlc.reg_list_format_direct_reg_list_length; i++) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), register_list_format[i]); + /* indirect register portion */ + while (i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2)) { + if (register_list_format[i] == 0xFFFFFFFF) { + WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); + continue; + } + + WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); + WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, register_list_format[i++]); + + for (j = 0; j < unique_indirect_reg_count; j++) { + if (register_list_format[i] == unique_indirect_regs[j]) { + WREG32_SOC15(GC, 0, mmRLC_GPM_SCRATCH_DATA, j); + break; + } + } + + BUG_ON(j >= unique_indirect_reg_count); + + i++; + } + /* set save/restore list size */ list_size = adev->gfx.rlc.reg_list_size_bytes >> 2; list_size = list_size >> 1; @@ -1878,14 +1906,19 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev) adev->gfx.rlc.starting_offsets_start); for (i = 0; i < ARRAY_SIZE(indirect_start_offsets); i++) WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_SCRATCH_DATA), - indirect_start_offsets[i]); + indirect_start_offsets[i]); /* load unique indirect regs*/ for (i = 0; i < ARRAY_SIZE(unique_indirect_regs); i++) { - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + i, - unique_indirect_regs[i] & 0x3FFFF); - WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + i, - unique_indirect_regs[i] >> 20); + if (unique_indirect_regs[i] != 0) { + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_ADDR_0) + + GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[i], + unique_indirect_regs[i] & 0x3FFFF); + + WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_INDEX_CNTL_DATA_0) + + GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[i], + unique_indirect_regs[i] >> 20); + } } kfree(register_list_format); @@ -2075,7 +2108,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) AMD_PG_SUPPORT_GDS | AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v9_0_init_csb(adev); - gfx_v9_0_init_rlc_save_restore_list(adev); + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); WREG32(mmRLC_JUMP_TABLE_RESTORE, -- cgit v1.2.1 From 0df3e67d343b6af7eb71f6353f93e4d0a5e952a7 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 6 Dec 2017 09:23:50 +0800 Subject: drm/amdgpu: add setting powergating method for gfx9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Hawking Zhang Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 3abd91f27e31..8d54207471d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3355,6 +3355,11 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; + r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + if (r) + return r; + return 0; } -- cgit v1.2.1 From af15890df97d09e2faba2199b36f5e69bf129342 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 14 Dec 2017 13:38:13 +0800 Subject: drm/amd/powerplay: send CGPG smc message if PG is enabled for raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Acked-by: Hawking Zhang Acked-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 8 +++++++- drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 0f252265a753..f0727b4f1ebf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -206,12 +206,18 @@ static int smu10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + struct amdgpu_device *adev = hwmgr->adev; smu10_data->vcn_power_gated = true; smu10_data->isp_tileA_power_gated = true; smu10_data->isp_tileB_power_gated = true; - return 0; + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + return smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetGfxCGPG, + true); + else + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index 426bff2aad2b..5d07b6ea0a55 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -75,6 +75,7 @@ #define PPSMC_MSG_GetMinGfxclkFrequency 0x2C #define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D #define PPSMC_MSG_SoftReset 0x2E +#define PPSMC_MSG_SetGfxCGPG 0x2F #define PPSMC_MSG_SetSoftMaxGfxClk 0x30 #define PPSMC_MSG_SetHardMinGfxClk 0x31 #define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 -- cgit v1.2.1 From fa7bd27d7352bfd57aed60a7e1b678bc1f475fc4 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 13 Mar 2018 15:13:46 +0800 Subject: drm/amdgpu: move PP_FEATURE_MASK to amd_shared header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It will be used not only for powerplay but also on amdgpu part in future patches. So move it into amd_shared header file. Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h | 2 -- drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 2 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 2 +- drivers/gpu/drm/amd/include/amd_shared.h | 19 +++++++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 19 ------------------- 5 files changed, 21 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 354c6dc99481..dd6203a0a6b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -52,8 +52,6 @@ enum amdgpu_dpm_event_src { AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 }; -#define SCLK_DEEP_SLEEP_MASK 0x8 - struct amdgpu_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index be6b19951e6a..f48168fbdfe6 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) + if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK) pi->caps_sclk_ds = true; else pi->caps_sclk_ds = false; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index bc1720ea4959..ef668a321ef1 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK) + if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK) pi->caps_sclk_ds = true; else pi->caps_sclk_ds = false; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 9fa3aaef3f33..efeea9a9f27e 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -109,6 +109,25 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) #define AMD_PG_SUPPORT_MMHUB (1 << 13) +enum PP_FEATURE_MASK { + PP_SCLK_DPM_MASK = 0x1, + PP_MCLK_DPM_MASK = 0x2, + PP_PCIE_DPM_MASK = 0x4, + PP_SCLK_DEEP_SLEEP_MASK = 0x8, + PP_POWER_CONTAINMENT_MASK = 0x10, + PP_UVD_HANDSHAKE_MASK = 0x20, + PP_SMC_VOLTAGE_CONTROL_MASK = 0x40, + PP_VBI_TIME_SUPPORT_MASK = 0x80, + PP_ULV_MASK = 0x100, + PP_ENABLE_GFX_CG_THRU_SMU = 0x200, + PP_CLOCK_STRETCH_MASK = 0x400, + PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, + PP_SOCCLK_DPM_MASK = 0x1000, + PP_DCEFCLK_DPM_MASK = 0x2000, + PP_OVERDRIVE_MASK = 0x4000, + PP_ACG_MASK = 0x10000, +}; + struct amd_ip_funcs { /* Name of IP block */ char *name; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 2f203ec3d19c..0d2b3cebd9cf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -66,25 +66,6 @@ struct vi_dpm_table { #define PCIE_PERF_REQ_GEN2 3 #define PCIE_PERF_REQ_GEN3 4 -enum PP_FEATURE_MASK { - PP_SCLK_DPM_MASK = 0x1, - PP_MCLK_DPM_MASK = 0x2, - PP_PCIE_DPM_MASK = 0x4, - PP_SCLK_DEEP_SLEEP_MASK = 0x8, - PP_POWER_CONTAINMENT_MASK = 0x10, - PP_UVD_HANDSHAKE_MASK = 0x20, - PP_SMC_VOLTAGE_CONTROL_MASK = 0x40, - PP_VBI_TIME_SUPPORT_MASK = 0x80, - PP_ULV_MASK = 0x100, - PP_ENABLE_GFX_CG_THRU_SMU = 0x200, - PP_CLOCK_STRETCH_MASK = 0x400, - PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, - PP_SOCCLK_DPM_MASK = 0x1000, - PP_DCEFCLK_DPM_MASK = 0x2000, - PP_OVERDRIVE_MASK = 0x4000, - PP_ACG_MASK = 0x10000, -}; - enum PHM_BackEnd_Magic { PHM_Dummy_Magic = 0xAA5555AA, PHM_RV770_Magic = 0xDCBAABCD, -- cgit v1.2.1 From 6f92ad2a1772ebaa5eb3d27c9c8dd8caf2e3cbdb Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 2 Mar 2018 14:16:06 +0800 Subject: drm/amdgpu: add gfxoff feature mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index efeea9a9f27e..33de33016bda 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -125,6 +125,7 @@ enum PP_FEATURE_MASK { PP_SOCCLK_DPM_MASK = 0x1000, PP_DCEFCLK_DPM_MASK = 0x2000, PP_OVERDRIVE_MASK = 0x4000, + PP_GFXOFF_MASK = 0x8000, PP_ACG_MASK = 0x10000, }; -- cgit v1.2.1 From 917d8614c4cdddfb257229e0fb3077b8842dd9e0 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 2 Mar 2018 14:40:53 +0800 Subject: drm/amdgpu: set gfxoff disabled by default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 7c17a0bc2cd2..998ba8e710de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -122,7 +122,7 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -uint amdgpu_pp_feature_mask = 0xffffbfff; +uint amdgpu_pp_feature_mask = 0xffff3fff; /* gfxoff (bit 15) disabled by default */ int amdgpu_ngg = 0; int amdgpu_prim_buf_per_se = 0; int amdgpu_pos_buf_per_se = 0; -- cgit v1.2.1 From 9c82214160ee5e2a1e4137612822a35dc0cc064b Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 2 Mar 2018 15:10:52 +0800 Subject: drm/amd/powerplay: add gfx off control function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gfx_off_control is used to be called for sending enabling/disabling gfxoff message. Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 36 ++++++++++++++++++++++- drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 2 files changed, 36 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f0727b4f1ebf..fde1e5c00a3c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -42,6 +42,13 @@ #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */ #define SMC_RAM_END 0x40000 +#define mmPWR_MISC_CNTL_STATUS 0x0183 +#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0 +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1 +#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L +#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L + static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; @@ -243,13 +250,31 @@ static int smu10_power_off_asic(struct pp_hwmgr *hwmgr) return smu10_reset_cc6_data(hwmgr); } +static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + struct amdgpu_device *adev = hwmgr->adev; + + reg = RREG32_SOC15(PWR, 0, mmPWR_MISC_CNTL_STATUS); + if ((reg & PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK) == + (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT)) + return true; + + return false; +} + static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) { struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - if (smu10_data->gfx_off_controled_by_driver) + if (smu10_data->gfx_off_controled_by_driver) { smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff); + /* confirm gfx is back to "on" state */ + while (!smu10_is_gfx_on(hwmgr)) + msleep(1); + } + return 0; } @@ -273,6 +298,14 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return smu10_enable_gfx_off(hwmgr); } +static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) +{ + if (enable) + return smu10_enable_gfx_off(hwmgr); + else + return smu10_disable_gfx_off(hwmgr); +} + static int smu10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps) @@ -1060,6 +1093,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .power_state_set = smu10_set_power_state_tasks, .dynamic_state_management_disable = smu10_disable_dpm_tasks, .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, + .gfx_off_control = smu10_gfx_off_control, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 0d2b3cebd9cf..3d9743f5bb45 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -296,6 +296,7 @@ struct pp_hwmgr_func { int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, struct pp_display_clock_request *clock); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); + int (*gfx_off_control)(struct pp_hwmgr *hwmgr, bool enable); int (*power_off_asic)(struct pp_hwmgr *hwmgr); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); -- cgit v1.2.1 From 775b0c11e27fce7d204d2911220fd7eebcc074d0 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 2 Mar 2018 15:18:54 +0800 Subject: drm/amd/powerplay: enable/disable gfxoff through smu MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index bd0d387584ac..6976596449a8 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -222,10 +222,19 @@ static int pp_set_powergating_state(void *handle, { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + int ret; if (!hwmgr || !hwmgr->pm_en) return 0; + if (hwmgr->hwmgr_func->gfx_off_control) { + /* Enable/disable GFX off through SMU */ + ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, + state == AMD_PG_STATE_GATE); + if (ret) + pr_err("gfx off control failed!\n"); + } + if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { pr_info("%s was not implemented.\n", __func__); return 0; -- cgit v1.2.1 From 00f54b97d7de97c41cffaad83d32a9bf03edad89 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 27 Feb 2018 21:53:00 +0800 Subject: drm/amdgpu: use pp_feature member to store the mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/ci_dpm.c | 2 +- drivers/gpu/drm/amd/amdgpu/kv_dpm.c | 2 +- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 2 +- 5 files changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 5ad893915a85..75700552c71d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1430,6 +1430,7 @@ enum amd_hw_ip_block_type { struct amd_powerplay { void *pp_handle; const struct amd_pm_funcs *pp_funcs; + uint32_t pp_feature; }; #define AMDGPU_RESET_MAGIC_NUM 64 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5958e8112489..e8b57cf48555 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1545,6 +1545,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) return -EAGAIN; } + adev->powerplay.pp_feature = amdgpu_pp_feature_mask; + for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d <%s>\n", diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index f48168fbdfe6..a266dcf5daed 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -5903,7 +5903,7 @@ static int ci_dpm_init(struct amdgpu_device *adev) pi->pcie_dpm_key_disabled = 0; pi->thermal_sclk_dpm_enabled = 0; - if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK) + if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) pi->caps_sclk_ds = true; else pi->caps_sclk_ds = false; diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index ef668a321ef1..17f7f074cedc 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2817,7 +2817,7 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->caps_tcp_ramping = true; } - if (amdgpu_pp_feature_mask & PP_SCLK_DEEP_SLEEP_MASK) + if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) pi->caps_sclk_ds = true; else pi->caps_sclk_ds = false; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 6976596449a8..246f8e9e9451 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -53,7 +53,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev) mutex_init(&hwmgr->smu_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; - hwmgr->feature_mask = amdgpu_pp_feature_mask; + hwmgr->feature_mask = adev->powerplay.pp_feature; hwmgr->display_config = &adev->pm.pm_display_cfg; adev->powerplay.pp_handle = hwmgr; adev->powerplay.pp_funcs = &pp_dpm_funcs; -- cgit v1.2.1 From 1dedc62338accff01ce4d56302e1c55a6b43b3d6 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 13 Mar 2018 17:59:12 +0800 Subject: drm/amdgpu: clear gfxoff feature mask if the asic is not raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index bca67df29c8c..d1052b5e0ca8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -95,7 +95,8 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | - PP_ENABLE_GFX_CG_THRU_SMU); + PP_ENABLE_GFX_CG_THRU_SMU | + PP_GFXOFF_MASK); hwmgr->pp_table_version = PP_TABLE_V0; hwmgr->od_enabled = false; smu7_init_function_pointers(hwmgr); @@ -103,9 +104,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_CZ: hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu8_smu_funcs; + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; smu8_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_TOPAZ: hwmgr->smumgr_funcs = &iceland_smu_funcs; @@ -139,6 +142,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_AI: + hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_VEGA10: hwmgr->smumgr_funcs = &vega10_smu_funcs; -- cgit v1.2.1 From 9667849bbb8d8a2b97798ba0972fe25d13ea8acf Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 13 Mar 2018 18:32:39 +0800 Subject: drm/amd/powerplay: add control gfxoff enabling in late init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 246f8e9e9451..b493369e6d0f 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -180,6 +180,7 @@ static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + int ret; if (hwmgr && hwmgr->pm_en) { mutex_lock(&hwmgr->smu_lock); @@ -189,6 +190,14 @@ static int pp_late_init(void *handle) } if (adev->pm.smu_prv_buffer_size != 0) pp_reserve_vram_for_smu(adev); + + if (hwmgr->hwmgr_func->gfx_off_control && + (hwmgr->feature_mask & PP_GFXOFF_MASK)) { + ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true); + if (ret) + pr_err("gfx off enabling failed!\n"); + } + return 0; } -- cgit v1.2.1 From b083369621e84dc0c8ec1ae7191d009f6f1c4d75 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 13 Mar 2018 18:39:48 +0800 Subject: drm/amdgpu: it should disable gfxoff when system is going to suspend MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e8b57cf48555..9e917f53f357 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1902,6 +1902,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) amdgpu_virt_request_full_gpu(adev, false); + /* ungate SMC block powergating */ + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + AMD_CG_STATE_UNGATE); + /* ungate SMC block first */ r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC, AMD_CG_STATE_UNGATE); -- cgit v1.2.1 From 151b5d7fd35876120dc744f93865e4c7dc2c1f36 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Tue, 27 Feb 2018 13:43:59 +0800 Subject: drm/amdgpu: fix to disable powergating in hw_fini MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We need enable CGPG and GFXOFF together. If only enable one of them, this system will get hang after startx (do draw command). So when gfxoff is disabled, it also need disable CGPG after that. Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8d54207471d7..2c5e2a41632e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3137,6 +3137,9 @@ static int gfx_v9_0_hw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); -- cgit v1.2.1 From 9ac4b0d95a7a554bb60d97fbee5fbfd1b73df50a Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 15 Dec 2017 14:34:57 +0800 Subject: drm/amdgpu: set CGPG if gfxoff is enabled for raven MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 65e781f05c24..90065766fffb 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -682,6 +682,11 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS; adev->pg_flags = AMD_PG_SUPPORT_SDMA; + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; + adev->external_rev_id = 0x1; break; default: -- cgit v1.2.1 From f5264548008a5cde7090c2b6b85c8d65cb86d2f7 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 14 Dec 2017 15:33:53 +0800 Subject: drm/amd/powerplay: use the flag to decide whether send gfxoff smc message MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index fde1e5c00a3c..7712eb62539a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -81,11 +81,15 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) smu10_data->thermal_auto_throttling_treshold = 0; smu10_data->is_nb_dpm_enabled = 1; smu10_data->dpm_flags = 1; - smu10_data->gfx_off_controled_by_driver = false; smu10_data->need_min_deep_sleep_dcefclk = true; smu10_data->num_active_display = 0; smu10_data->deep_sleep_dcefclk = 0; + if (hwmgr->feature_mask & PP_GFXOFF_MASK) + smu10_data->gfx_off_controled_by_driver = true; + else + smu10_data->gfx_off_controled_by_driver = false; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep); -- cgit v1.2.1 From 1d2361e5a6c60d7b142d19555c3e6240ffe93731 Mon Sep 17 00:00:00 2001 From: Samuel Li Date: Wed, 18 Apr 2018 15:06:02 -0400 Subject: drm/amdgpu: Rename amdgpu_display_framebuffer_domains() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It returns supported domains for display, and domains actually used are to be decided later when pinned. Signed-off-by: Samuel Li Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_display.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +-- 5 files changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index b83ae998fe27..76ee8e04ff11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -189,7 +189,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, goto cleanup; } - r = amdgpu_bo_pin(new_abo, amdgpu_display_framebuffer_domains(adev), &base); + r = amdgpu_bo_pin(new_abo, amdgpu_display_supported_domains(adev), &base); if (unlikely(r != 0)) { DRM_ERROR("failed to pin new abo buffer before flip\n"); goto unreserve; @@ -484,7 +484,7 @@ static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { .create_handle = drm_gem_fb_create_handle, }; -uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev) +uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev) { uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index 2b11d808f297..f66e3e3fef0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -23,7 +23,7 @@ #ifndef __AMDGPU_DISPLAY_H__ #define __AMDGPU_DISPLAY_H__ -uint32_t amdgpu_display_framebuffer_domains(struct amdgpu_device *adev); +uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev); struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index ff89e84b34ce..bc5fd8ebab5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -137,7 +137,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, /* need to align pitch with crtc limits */ mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, fb_tiled); - domain = amdgpu_display_framebuffer_domains(adev); + domain = amdgpu_display_supported_domains(adev); height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitches[0] * height; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 713417b6d15d..4683626b065f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -215,7 +215,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_operation_ctx ctx = { true, false }; - u32 domain = amdgpu_display_framebuffer_domains(adev); + u32 domain = amdgpu_display_supported_domains(adev); int ret; bool reads = (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2368ade4bae0..28d8c08efeeb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3049,12 +3049,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, return r; if (plane->type != DRM_PLANE_TYPE_CURSOR) - domain = amdgpu_display_framebuffer_domains(adev); + domain = amdgpu_display_supported_domains(adev); else domain = AMDGPU_GEM_DOMAIN_VRAM; r = amdgpu_bo_pin(rbo, domain, &afb->address); - amdgpu_bo_unreserve(rbo); if (unlikely(r != 0)) { -- cgit v1.2.1 From 9b3f217faf48603c91d4ca44a18e6ff74c3c1c0c Mon Sep 17 00:00:00 2001 From: Samuel Li Date: Wed, 18 Apr 2018 16:26:18 -0400 Subject: drm/amdgpu: Remove VRAM from shared bo domains. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes an issue introduced by change "allow framebuffer in GART memory as well" which could lead to a shared buffer ending up pinned in vram. Use GTT if it is included in the domain, otherwise return an error. Signed-off-by: Samuel Li Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index feece0a491a3..1985c08413c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -694,8 +694,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) - return -EINVAL; + if (bo->prime_shared_count) { + if (domain & AMDGPU_GEM_DOMAIN_GTT) + domain = AMDGPU_GEM_DOMAIN_GTT; + else + return -EINVAL; + } if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; -- cgit v1.2.1 From 8567f68147de1f09cc868b52b02a0c11dc048206 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 13:46:03 -0500 Subject: drm/amdgpu/pm: document power_dpm_force_performance_level Provide documentation for power_dpm_force_performance_level which is used to adjust things related to GPU power states. Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 53 ++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 744f105a2c75..ee11e92cc4d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -131,6 +131,59 @@ fail: return count; } + +/** + * DOC: power_dpm_force_performance_level + * + * The amdgpu driver provides a sysfs API for adjusting certain power + * related parameters. The file power_dpm_force_performance_level is + * used for this. It accepts the following arguments: + * - auto + * - low + * - high + * - manual + * - GPU fan + * - profile_standard + * - profile_min_sclk + * - profile_min_mclk + * - profile_peak + * + * auto + * + * When auto is selected, the driver will attempt to dynamically select + * the optimal power profile for current conditions in the driver. + * + * low + * + * When low is selected, the clocks are forced to the lowest power state. + * + * high + * + * When high is selected, the clocks are forced to the highest power state. + * + * manual + * + * When manual is selected, the user can manually adjust which power states + * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, + * and pp_dpm_pcie files and adjust the power state transition heuristics + * via the pp_power_profile_mode sysfs file. + * + * profile_standard + * profile_min_sclk + * profile_min_mclk + * profile_peak + * + * When the profiling modes are selected, clock and power gating are + * disabled and the clocks are set for different profiling cases. This + * mode is recommended for profiling specific work loads where you do + * not want clock or power gating for clock fluctuation to interfere + * with your results. profile_standard sets the clocks to a fixed clock + * level which varies from asic to asic. profile_min_sclk forces the sclk + * to the lowest level. profile_min_mclk forces the mclk to the lowest level. + * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. + * + */ + static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.1 From ca8d40ca194dfb573e59a5e42b88da83e63a6630 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 13:56:41 -0500 Subject: drm/amdgpu/pm: document power_dpm_state This is a legacy file and is only provided for backwards compatibility. Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ee11e92cc4d2..e33e0f4c4a28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -77,6 +77,37 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) } } +/** + * DOC: power_dpm_state + * + * This is a legacy interface and is only provided for backwards compatibility. + * The amdgpu driver provides a sysfs API for adjusting certain power + * related parameters. The file power_dpm_state is used for this. + * It accepts the following arguments: + * - battery + * - balanced + * - performance + * + * battery + * + * On older GPUs, the vbios provided a special power state for battery + * operation. Selecting battery switched to this state. This is no + * longer provided on newer GPUs so the option does nothing in that case. + * + * balanced + * + * On older GPUs, the vbios provided a special power state for balanced + * operation. Selecting balanced switched to this state. This is no + * longer provided on newer GPUs so the option does nothing in that case. + * + * performance + * + * On older GPUs, the vbios provided a special power state for performance + * operation. Selecting performance switched to this state. This is no + * longer provided on newer GPUs so the option does nothing in that case. + * + */ + static ssize_t amdgpu_get_dpm_state(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.1 From d54bb40f607d40fca60da0613c65005086653300 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 14:02:52 -0500 Subject: drm/amdgpu/pm: document pp_table This file is for uploading new powerplay tables. Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index e33e0f4c4a28..9982f1b1f8c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -408,6 +408,17 @@ fail: return count; } +/** + * DOC: pp_table + * + * The amdgpu driver provides a sysfs API for uploading new powerplay + * tables. The file pp_table is used for this. Reading the file + * will dump the current power play table. Writing to the file + * will attempt to upload a new powerplay table and re-initialize + * powerplay using that new table. + * + */ + static ssize_t amdgpu_get_pp_table(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.1 From 271dc908732b72bb9b1ad22b7cd14e75df3612c5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 14:22:24 -0500 Subject: drm/amdgpu/pm: document pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie (v2) Used for manually masking dpm states. v2: drop comment about current state (Rex) Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 9982f1b1f8c4..07f2e9606337 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -539,6 +539,23 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, } +/** + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie + * + * The amdgpu driver provides a sysfs API for adjusting what power levels + * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, + * and pp_dpm_pcie are used for this. + * + * Reading back the files will show you the available power levels within + * the power state and the clock information for those levels. + * + * To manually adjust these states, first select manual using + * power_dpm_force_performance_level. Writing a string of the level + * numbers to the file will select which levels you want to enable. + * E.g., writing 456 to the file will enable levels 4, 5, and 6. + * + */ + static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.1 From 6b2576f5bddae4c89f29481f387735ac99e256d5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 14:38:31 -0500 Subject: drm/amdgpu/pm: document pp_power_profile_mode sysfs file for adjusting power level heuristics. Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 07f2e9606337..d6e66414bb12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -780,6 +780,26 @@ fail: return count; } +/** + * DOC: pp_power_profile_mode + * + * The amdgpu driver provides a sysfs API for adjusting the heuristics + * related to switching between power levels in a power state. The file + * pp_power_profile_mode is used for this. + * + * Reading this file outputs a list of all of the predefined power profiles + * and the relevant heuristics settings for that profile. + * + * To select a profile or create a custom profile, first select manual using + * power_dpm_force_performance_level. Writing the number of a predefined + * profile to pp_power_profile_mode will enable those heuristics. To + * create a custom set of heuristics, write a string of numbers to the file + * starting with the number of the custom profile along with a setting + * for each heuristic parameter. Due to differences across asic families + * the heuristic parameters vary from family to family. + * + */ + static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, struct device_attribute *attr, char *buf) -- cgit v1.2.1 From 4e418c3401867cccc3ba67973d1e03510da7c92d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 14:59:55 -0500 Subject: drm/amdgpu/pm: document pp_od_clk_voltage sysfs interface for fine grained clock and voltage control. Acked-by: Felix Kuehling Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index d6e66414bb12..ce8be467608d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -455,6 +455,29 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, return count; } +/** + * DOC: pp_od_clk_voltage + * + * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages + * in each power level within a power state. The pp_od_clk_voltage is used for + * this. + * + * Reading the file will display: + * - a list of engine clock levels and voltages labeled OD_SCLK + * - a list of memory clock levels and voltages labeled OD_MCLK + * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE + * + * To manually adjust these settings, first select manual using + * power_dpm_force_performance_level. Enter a new value for each + * level by writing a string that contains "s/m level clock voltage" to + * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz + * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at + * 810 mV. When you have edited all of the states as needed, write + * "c" (commit) to the file to commit your changes. If you want to reset to the + * default power levels, write "r" (reset) to the file to reset them. + * + */ + static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, struct device_attribute *attr, const char *buf, -- cgit v1.2.1 From d10fb4a6f382474025f326bf90ee3b64396486ea Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 20 Apr 2018 12:57:10 +0800 Subject: drm/amd/pp: Change pstate_clk frequency unit to 10KHz on Rv to keep consistent with other asics Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 7712eb62539a..ef09073c88d9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -479,8 +479,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; - hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK; - hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK; + hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100; + hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100; return result; } -- cgit v1.2.1 From 21c77de35661152e118908a081b8a51e7bca7bb4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 20 Apr 2018 13:03:15 +0800 Subject: drm/amd/pp: Use dynamic gfx_clk rather than hardcoded values Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 47 ++++++++++++++--------- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h | 2 - 2 files changed, 29 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index ef09073c88d9..be6d6e202819 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -383,7 +383,7 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr, static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) { - int result; + uint32_t result; struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); DpmClocks_t *table = &(smu10_data->clock_table); @@ -429,11 +429,11 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency); result = smum_get_argument(hwmgr); - smu10_data->gfx_min_freq_limit = result * 100; + smu10_data->gfx_min_freq_limit = result / 10 * 1000; smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency); result = smum_get_argument(hwmgr); - smu10_data->gfx_max_freq_limit = result * 100; + smu10_data->gfx_max_freq_limit = result / 10 * 1000; return 0; } @@ -515,6 +515,8 @@ static int smu10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { + struct smu10_hwmgr *data = hwmgr->backend; + if (hwmgr->smu_version < 0x1E3700) { pr_info("smu firmware version too old, can not set dpm level\n"); return 0; @@ -525,7 +527,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -538,7 +540,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -552,10 +554,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); break; case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: smum_send_msg_to_smc_with_parameter(hwmgr, @@ -595,7 +597,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_AUTO: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, SMU10_UMD_PSTATE_MIN_FCLK); @@ -608,7 +610,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_PEAK_GFXCLK); + data->gfx_max_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, SMU10_UMD_PSTATE_PEAK_FCLK); @@ -622,10 +624,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, case AMD_DPM_FORCED_LEVEL_LOW: smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxGfxClk, - SMU10_UMD_PSTATE_MIN_GFXCLK); + data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, SMU10_UMD_PSTATE_MIN_FCLK); @@ -773,21 +775,30 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, struct smu10_hwmgr *data = (struct smu10_hwmgr *)(hwmgr->backend); struct smu10_voltage_dependency_table *mclk_table = data->clock_vol_info.vdd_dep_on_fclk; - int i, now, size = 0; + uint32_t i, now, size = 0; switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency); now = smum_get_argument(hwmgr); + /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ + if (now == data->gfx_max_freq_limit/100) + i = 2; + else if (now == data->gfx_min_freq_limit/100) + i = 0; + else + i = 1; + size += sprintf(buf + size, "0: %uMhz %s\n", - data->gfx_min_freq_limit / 100, - ((data->gfx_min_freq_limit / 100) - == now) ? "*" : ""); + data->gfx_min_freq_limit/100, + i == 0 ? "*" : ""); size += sprintf(buf + size, "1: %uMhz %s\n", - data->gfx_max_freq_limit / 100, - ((data->gfx_max_freq_limit / 100) - == now) ? "*" : ""); + i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sprintf(buf + size, "2: %uMhz %s\n", + data->gfx_max_freq_limit/100, + i == 2 ? "*" : ""); break; case PP_MCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h index f68b218b9bce..1fb296a996f3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h @@ -311,11 +311,9 @@ int smu10_init_function_pointers(struct pp_hwmgr *hwmgr); #define SMU10_UMD_PSTATE_FCLK 933 #define SMU10_UMD_PSTATE_VCE 0x03C00320 -#define SMU10_UMD_PSTATE_PEAK_GFXCLK 1100 #define SMU10_UMD_PSTATE_PEAK_SOCCLK 757 #define SMU10_UMD_PSTATE_PEAK_FCLK 1200 -#define SMU10_UMD_PSTATE_MIN_GFXCLK 200 #define SMU10_UMD_PSTATE_MIN_FCLK 400 #define SMU10_UMD_PSTATE_MIN_SOCCLK 200 #define SMU10_UMD_PSTATE_MIN_VCE 0x0190012C -- cgit v1.2.1 From ca6e0c5bdc44a2cd7152002191a8107fc566084f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 18 Apr 2018 18:43:19 +0800 Subject: drm/amd/pp: Refine the OD state checking code in smu7 if vddc restore to default value, driver clear the bit of DPMTABLE_OD_UPDATE_VDDC and need to repopulate sclk and mclk table. 1. Remove variable i checking code. 2. move clear DPMTABLE_OD_UPDATE_VDDC bit to the end of the function to avoid sclk table will not be updated. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 720ac47d3365..965459326652 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4683,10 +4683,6 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) return; } } - if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { - data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; - } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); @@ -4696,9 +4692,9 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) return; } } - if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; } } -- cgit v1.2.1 From d389d607e60809726fe818113c80f5fc3aac4675 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 18 Apr 2018 21:09:35 +0800 Subject: drm/amd/pp: Change voltage/clk range for OD feature on VI read vddc range from vbios. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 28 +++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 3 ++ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 60 ++++++++++++++++-------- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 2 + 4 files changed, 73 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index 971fb5dfb620..d58be7eb8256 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1505,3 +1505,31 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, return 0; } + +void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, + uint32_t *min_vddc) +{ + void *profile; + + profile = smu_atom_get_data_table(hwmgr->adev, + GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), + NULL, NULL, NULL); + + if (profile) { + switch (hwmgr->chip_id) { + case CHIP_TONGA: + case CHIP_FIJI: + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4); + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4); + break; + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100); + *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100); + break; + default: + return; + } + } +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index c672a5069840..e1b5d6b0b548 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -320,5 +320,8 @@ extern int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t efuse_voltage_id); extern int atomctrl_get_leakage_id_from_efuse(struct pp_hwmgr *hwmgr, uint16_t *virtual_voltage_id); + +extern void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, + uint32_t *min_vddc); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 965459326652..e1196372a6ba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -838,6 +838,33 @@ static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr) return 0; } +static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t min_vddc, max_vddc; + + if (!table_info) + return; + + dep_sclk_table = table_info->vdd_dep_on_sclk; + + atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc); + + if (min_vddc == 0 || min_vddc > 2000 + || min_vddc > dep_sclk_table->entries[0].vddc) + min_vddc = dep_sclk_table->entries[0].vddc; + + if (max_vddc == 0 || max_vddc > 2000 + || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc) + max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc; + + data->odn_dpm_table.min_vddc = min_vddc; + data->odn_dpm_table.max_vddc = max_vddc; +} + static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -856,8 +883,10 @@ static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) sizeof(struct smu7_dpm_table)); /* initialize ODN table */ - if (hwmgr->od_enabled) + if (hwmgr->od_enabled) { + smu7_setup_voltage_range_from_vbios(hwmgr); smu7_odn_initial_default_setting(hwmgr); + } return 0; } @@ -4605,36 +4634,27 @@ static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t min_vddc; - struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; - - if (table_info == NULL) - return false; - - dep_sclk_table = table_info->vdd_dep_on_sclk; - min_vddc = dep_sclk_table->entries[0].vddc; - - if (voltage < min_vddc || voltage > 2000) { - pr_info("OD voltage is out of range [%d - 2000] mV\n", min_vddc); + if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) { + pr_info("OD voltage is out of range [%d - %d] mV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); return false; } if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { - if (data->vbios_boot_state.sclk_bootup_value > clk || + if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk || hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { pr_info("OD engine clock is out of range [%d - %d] MHz\n", - data->vbios_boot_state.sclk_bootup_value, - hwmgr->platform_descriptor.overdriveLimit.engineClock / 100); + data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); return false; } } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { - if (data->vbios_boot_state.mclk_bootup_value > clk || + if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk || hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { pr_info("OD memory clock is out of range [%d - %d] MHz\n", - data->vbios_boot_state.mclk_bootup_value/100, - hwmgr->platform_descriptor.overdriveLimit.memoryClock / 100); + data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); return false; } } else { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index f40179c9ca97..51a776ed5906 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -184,6 +184,8 @@ struct smu7_odn_dpm_table { struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; struct smu7_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; uint32_t odn_mclk_min_limit; + uint32_t min_vddc; + uint32_t max_vddc; }; struct profile_mode_setting { -- cgit v1.2.1 From a3c991f922f99160cb695f9d28e04cd8e818d6f9 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 19 Apr 2018 10:39:17 +0800 Subject: drm/amd/pp: Print out voltage/clock range in sysfs when user cat pp_od_clk_voltage add display info about the sclk/mclk/vddc range that user can overdrive output as: OD_SCLK: 0: 300MHz 900mV 1: 400MHz 912mV 2: 500MHz 925mV 3: 600MHz 937mV 4: 700MHz 950mV 5: 800MHz 975mV 6: 900MHz 987mV 7: 1000MHz 1000mV OD_MCLK: 0: 300MHz 900mV 1: 1500MHz 912mV OD_RANGE: SCLK: 300MHz 1200MHz MCLK: 300MHz 1500MHz VDDC: 700mV 1200mV also 1. remove unnecessary whitespace before a quoted newline 2. change unit of frequency Mhz to MHz Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 1 + drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 26 ++++++++++++++++++------ 3 files changed, 22 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index ce8be467608d..27d8dd77860d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -555,6 +555,7 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, if (adev->powerplay.pp_funcs->print_clock_levels) { size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); + size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); return size; } else { return snprintf(buf, PAGE_SIZE, "\n"); diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 01969b135ab4..06f08f34a110 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -94,6 +94,7 @@ enum pp_clock_type { PP_PCIE, OD_SCLK, OD_MCLK, + OD_RANGE, }; enum amd_pp_sensors { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index e1196372a6ba..232ea6fc30f4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4335,22 +4335,36 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size = sprintf(buf, "%s: \n", "OD_SCLK"); + size = sprintf(buf, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", - i, odn_sclk_table->entries[i].clock / 100, + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", + i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size = sprintf(buf, "%s: \n", "OD_MCLK"); + size = sprintf(buf, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sprintf(buf + size, "%d: %10uMhz %10u mV\n", - i, odn_mclk_table->entries[i].clock / 100, + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", + i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; + case OD_RANGE: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); + } + break; default: break; } -- cgit v1.2.1 From 9e70b539292652d31091568f89e73b54e3a4f79d Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Sat, 21 Apr 2018 14:09:59 -0500 Subject: drm/amdgpu/powerplay: actually return the power with the new query MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set query to the power value so we actually return it. Fixes no power value returned on asics with the new query. Tested-by: Dieter Nützel Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 232ea6fc30f4..c9dd0bec1e24 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3369,6 +3369,7 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query) smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0); tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0); + *query = tmp; if (tmp != 0) return 0; -- cgit v1.2.1 From 6c8d74caa2fa33908ecd07fb1cf1b7bc629b367a Mon Sep 17 00:00:00 2001 From: Samuel Li Date: Wed, 18 Apr 2018 16:15:52 -0400 Subject: drm/amdgpu: Enable scatter gather display support Enables sg display if vram size <= THRESHOLD(256M); otherwise still use vram as display buffer. This patch fixed some potention issues introduced by change "allow framebuffer in GART memory as well" due to CZ/ST hardware limitation. v2: Change default setting to auto. v3: Move some logic from amdgpu_display_framebuffer_domains() to pin function, suggested by Christian. v4: Split into several patches. v5: Drop module parameter for now. Signed-off-by: Samuel Li Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 75700552c71d..03a2c0be0bf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -138,6 +138,7 @@ extern int amdgpu_si_support; extern int amdgpu_cik_support; #endif +#define AMDGPU_SG_THRESHOLD (256*1024*1024) #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 1985c08413c6..e62153a86001 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -701,6 +701,15 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; } + /* This assumes only APU display buffers are pinned with (VRAM|GTT). + * See function amdgpu_display_supported_domains() + */ + if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) + domain = AMDGPU_GEM_DOMAIN_GTT; + } + if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; -- cgit v1.2.1 From 8239f57ac3e9bf9ad0cf4d396ebfa721e91ac611 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Mon, 23 Apr 2018 17:21:21 +0800 Subject: drm/amdgpu: bo could be null when access in vm bo update Signed-off-by: Junwei Zhang Reviewed-by: David Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 6a372ca11ee3..1c00f1a56e8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1509,7 +1509,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct drm_mm_node *nodes; struct dma_fence *exclusive, **last_update; uint64_t flags; - uint32_t mem_type; int r; if (clear || !bo_va->base.bo) { @@ -1568,9 +1567,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, * the evicted list so that it gets validated again on the * next command submission. */ - mem_type = bo->tbo.mem.mem_type; if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && - !(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + !(bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))) list_add_tail(&bo_va->base.vm_status, &vm->evicted); spin_unlock(&vm->status_lock); -- cgit v1.2.1 From 38610f15a7ad7a914e4fd0a9a5a6c386700b8ba0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolai=20H=C3=A4hnle?= Date: Thu, 12 Apr 2018 16:34:19 +0200 Subject: drm/amdgpu: set COMPUTE_PGM_RSRC1 for SGPR/VGPR clearing shaders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Otherwise, the SQ may skip some of the register writes, or shader waves may be allocated where we don't expect them, so that as a result we don't actually reset all of the register SRAMs. This can lead to spurious ECC errors later on if a shader uses an uninitialized register. Signed-off-by: Nicolai Hähnle Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b0e591eaa71a..e14263fca1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] = static const u32 vgpr_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff, - mmCOMPUTE_RESOURCE_LIMITS, 0, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*4, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] = static const u32 sgpr1_init_regs[] = { mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f, - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */ mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, @@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] = mmCOMPUTE_NUM_THREAD_X, 256*5, mmCOMPUTE_NUM_THREAD_Y, 1, mmCOMPUTE_NUM_THREAD_Z, 1, + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */ mmCOMPUTE_PGM_RSRC2, 20, mmCOMPUTE_USER_DATA_0, 0xedcedc00, mmCOMPUTE_USER_DATA_1, 0xedcedc01, -- cgit v1.2.1 From 48edde3959e2a538ff963e6dbdc9c9adca8b159b Mon Sep 17 00:00:00 2001 From: welu Date: Tue, 24 Apr 2018 09:13:20 -0400 Subject: drm/amdgpu: change pp_dpm clk/mclk/pcie input format. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. support more than 8 values when setting get_pp_dpm_mclk/ sclk/pcie, the former design just parse command format like "echo xxxx > pp_dpm_sclk" and current can parse "echo xx xxx xxxx > pp_dpm_sclk" whose operation is more user-friendly and convinent and can offer more values; 2. be compatible with former design like "xx". 3. add DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_pcie Bug:KFD-385 Signed-off-by: welu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 103 +++++++++++++++++++-------------- 1 file changed, 59 insertions(+), 44 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 27d8dd77860d..d9802d938e33 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -574,10 +574,10 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, * the power state and the clock information for those levels. * * To manually adjust these states, first select manual using - * power_dpm_force_performance_level. Writing a string of the level - * numbers to the file will select which levels you want to enable. - * E.g., writing 456 to the file will enable levels 4, 5, and 6. - * + * power_dpm_force_performance_level. + * Secondly,Enter a new value for each level by inputing a string that + * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" + * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. */ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, @@ -602,23 +602,27 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; - uint32_t i, mask = 0; - char sub_str[2]; + uint32_t mask = 0; + char *sub_str = NULL; + char *tmp; + char buf_cpy[count]; + const char delimiter[3] = {' ', '\n', '\0'}; - for (i = 0; i < strlen(buf); i++) { - if (*(buf + i) == '\n') - continue; - sub_str[0] = *(buf + i); - sub_str[1] = '\0'; - ret = kstrtol(sub_str, 0, &level); + memcpy(buf_cpy, buf, count+1); + tmp = buf_cpy; + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); + if (strlen(sub_str)) { + ret = kstrtol(sub_str, 0, &level); - if (ret) { - count = -EINVAL; - goto fail; - } - mask |= 1 << level; + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; + } else + break; } - if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -648,21 +652,26 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; - uint32_t i, mask = 0; - char sub_str[2]; + uint32_t mask = 0; + char *sub_str = NULL; + char *tmp; + char buf_cpy[count]; + const char delimiter[3] = {' ', '\n', '\0'}; - for (i = 0; i < strlen(buf); i++) { - if (*(buf + i) == '\n') - continue; - sub_str[0] = *(buf + i); - sub_str[1] = '\0'; - ret = kstrtol(sub_str, 0, &level); + memcpy(buf_cpy, buf, count+1); + tmp = buf_cpy; + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); + if (strlen(sub_str)) { + ret = kstrtol(sub_str, 0, &level); - if (ret) { - count = -EINVAL; - goto fail; - } - mask |= 1 << level; + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; + } else + break; } if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -693,21 +702,27 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, struct amdgpu_device *adev = ddev->dev_private; int ret; long level; - uint32_t i, mask = 0; - char sub_str[2]; + uint32_t mask = 0; + char *sub_str = NULL; + char *tmp; + char buf_cpy[count]; + const char delimiter[3] = {' ', '\n', '\0'}; - for (i = 0; i < strlen(buf); i++) { - if (*(buf + i) == '\n') - continue; - sub_str[0] = *(buf + i); - sub_str[1] = '\0'; - ret = kstrtol(sub_str, 0, &level); + memcpy(buf_cpy, buf, count+1); + tmp = buf_cpy; - if (ret) { - count = -EINVAL; - goto fail; - } - mask |= 1 << level; + while (tmp[0]) { + sub_str = strsep(&tmp, delimiter); + if (strlen(sub_str)) { + ret = kstrtol(sub_str, 0, &level); + + if (ret) { + count = -EINVAL; + goto fail; + } + mask |= 1 << level; + } else + break; } if (adev->powerplay.pp_funcs->force_clock_level) amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); -- cgit v1.2.1 From 09daf474d27aeb9fbd2f665b613d98c76f1e84f0 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 24 Apr 2018 15:15:34 +0200 Subject: drm/amdgpu: fix amdgpu_atpx_get_client_id()'s return type The method struct vga_switcheroo_handler::get_client_id() is defined as returning an 'enum vga_switcheroo_client_id' but the implementation in this driver, amdgpu_atpx_get_client_id(), returns an 'int'. Fix this by returning 'enum vga_switcheroo_client_id' in this driver too. Signed-off-by: Luc Van Oostenryck Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 1ae5ae8c45a4..1bcb2b247335 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -550,7 +550,7 @@ static int amdgpu_atpx_init(void) * look up whether we are the integrated or discrete GPU (all asics). * Returns the client id. */ -static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) +static enum vga_switcheroo_client_id amdgpu_atpx_get_client_id(struct pci_dev *pdev) { if (amdgpu_atpx_priv.dhandle == ACPI_HANDLE(&pdev->dev)) return VGA_SWITCHEROO_IGD; -- cgit v1.2.1 From ba9ca0886dc0541ac1a716b3cbd43f640a1ce8c4 Mon Sep 17 00:00:00 2001 From: Luc Van Oostenryck Date: Tue, 24 Apr 2018 15:14:18 +0200 Subject: drm/admgpu: fix mode_valid's return type The method struct drm_connector_helper_funcs::mode_valid is defined as returning an 'enum drm_mode_status' but the driver implementation for this method uses an 'int' for it. Fix this by using 'enum drm_mode_status' in the driver too. Reviewed-by: Harry Wentland Signed-off-by: Luc Van Oostenryck Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 96501ff0e55b..8e66851eb427 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -691,7 +691,7 @@ static int amdgpu_connector_lvds_get_modes(struct drm_connector *connector) return ret; } -static int amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_lvds_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); @@ -843,7 +843,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) return ret; } -static int amdgpu_connector_vga_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_vga_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1172,7 +1172,7 @@ static void amdgpu_connector_dvi_force(struct drm_connector *connector) amdgpu_connector->use_digital = true; } -static int amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_dvi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct drm_device *dev = connector->dev; @@ -1448,7 +1448,7 @@ out: return ret; } -static int amdgpu_connector_dp_mode_valid(struct drm_connector *connector, +static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 89b2286a9d6b..6454cc371f57 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -327,7 +327,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector) return 0; } -static int dce_virtual_mode_valid(struct drm_connector *connector, +static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { return MODE_OK; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 28d8c08efeeb..656a01891f6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2838,7 +2838,7 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) create_eml_sink(aconnector); } -int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, +enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { int result = MODE_ERROR; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 005cf0d2dc34..d5aa89ad5571 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -247,7 +247,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, struct dc_link *link, int link_index); -int amdgpu_dm_connector_mode_valid(struct drm_connector *connector, +enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); void dm_restore_drm_connector_state(struct drm_device *dev, -- cgit v1.2.1 From c5a4484941be553b37facd681daf990d040cce81 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 18 Apr 2018 18:46:07 +0800 Subject: drm/amd/pp: Add OVERDRIVE support on Vega10 (v2) when bit14 in module parameter ppfeaturemask was set. od feature will be enabled on Vega10 except vbios not support. user can read od range by reading sysfs pp_od_clk_voltage, cat pp_od_clk_voltage OD_SCLK: 0: 852Mhz 800mV 1: 991Mhz 900mV 2: 1138Mhz 950mV 3: 1269Mhz 1000mV 4: 1348Mhz 1050mV 5: 1399Mhz 1100mV 6: 1440Mhz 1150mV 7: 1500Mhz 1200mV OD_MCLK: 0: 167Mhz 800mV 1: 500Mhz 800mV 2: 800Mhz 950mV 3: 945Mhz 1000mV OD_RANGE: SCLK: 852MHz 2200MHz MCLK: 167MHz 1500MHz VDDC: 800mV 1200mV and can configure the clock/voltage by writing pp_od_clk_voltage for example: echo "s 0 900 820">pp_od_clk_voltage to change the sclk/vddc to 900MHz and 820 mV in dpm level0. echo "r" to change the clk/voltage to default value. echo "c">pp_od_clk_voltage to commit the change v2: squash in warning fix (Alex) Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 719 +++++++++++---------- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 26 +- .../gpu/drm/amd/powerplay/inc/hardwaremanager.h | 6 +- 3 files changed, 390 insertions(+), 361 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 384aa07206c0..748612074d20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -285,6 +285,48 @@ static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) return 0; } +static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct vega10_odn_vddc_lookup_table *od_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3]; + struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3]; + uint32_t i; + + od_lookup_table = &odn_table->vddc_lookup_table; + vddc_lookup_table = table_info->vddc_lookup_table; + + for (i = 0; i < vddc_lookup_table->count; i++) + od_lookup_table->entries[i].us_vdd = vddc_lookup_table->entries[i].us_vdd; + + od_lookup_table->count = vddc_lookup_table->count; + + dep_table[0] = table_info->vdd_dep_on_sclk; + dep_table[1] = table_info->vdd_dep_on_mclk; + dep_table[2] = table_info->vdd_dep_on_socclk; + od_table[0] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_sclk; + od_table[1] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_mclk; + od_table[2] = (struct phm_ppt_v1_clock_voltage_dependency_table *)&odn_table->vdd_dep_on_socclk; + + for (i = 0; i < 3; i++) + smu_get_voltage_dependency_table_ppt_v1(dep_table[i], od_table[i]); + + if (odn_table->max_vddc == 0 || odn_table->max_vddc > 2000) + odn_table->max_vddc = dep_table[0]->entries[dep_table[0]->count - 1].vddc; + if (odn_table->min_vddc == 0 || odn_table->min_vddc > 2000) + odn_table->min_vddc = dep_table[0]->entries[0].vddc; + + i = od_table[2]->count - 1; + od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock; + od_table[2]->entries[i].vddc = odn_table->max_vddc; + + return 0; +} + static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) { struct vega10_hwmgr *data = hwmgr->backend; @@ -421,7 +463,6 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) /* ACG firmware has major version 5 */ if ((hwmgr->smu_version & 0xff000000) == 0x5000000) data->smu_features[GNLD_ACG].supported = true; - if (data->registry_data.didt_support) data->smu_features[GNLD_DIDT].supported = true; @@ -1360,48 +1401,6 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) memcpy(&(data->golden_dpm_table), &(data->dpm_table), sizeof(struct vega10_dpm_table)); - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - data->odn_dpm_table.odn_core_clock_dpm_levels.num_of_pl = - data->dpm_table.gfx_table.count; - for (i = 0; i < data->dpm_table.gfx_table.count; i++) { - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].clock = - data->dpm_table.gfx_table.dpm_levels[i].value; - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[i].enabled = true; - } - - data->odn_dpm_table.vdd_dependency_on_sclk.count = - dep_gfx_table->count; - for (i = 0; i < dep_gfx_table->count; i++) { - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk = - dep_gfx_table->entries[i].clk; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd = - dep_gfx_table->entries[i].vddInd; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable = - dep_gfx_table->entries[i].cks_enable; - data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset = - dep_gfx_table->entries[i].cks_voffset; - } - - data->odn_dpm_table.odn_memory_clock_dpm_levels.num_of_pl = - data->dpm_table.mem_table.count; - for (i = 0; i < data->dpm_table.mem_table.count; i++) { - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].clock = - data->dpm_table.mem_table.dpm_levels[i].value; - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[i].enabled = true; - } - - data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count; - for (i = 0; i < dep_mclk_table->count; i++) { - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk = - dep_mclk_table->entries[i].clk; - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd = - dep_mclk_table->entries[i].vddInd; - data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci = - dep_mclk_table->entries[i].vddci; - } - } - return 0; } @@ -1504,18 +1503,18 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, { struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk = - table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk; struct vega10_hwmgr *data = hwmgr->backend; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t gfx_max_clock = hwmgr->platform_descriptor.overdriveLimit.engineClock; uint32_t i = 0; - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_VDDC) + if (hwmgr->od_enabled) dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) - &(data->odn_dpm_table.vdd_dependency_on_sclk); + &(data->odn_dpm_table.vdd_dep_on_sclk); + else + dep_on_sclk = table_info->vdd_dep_on_sclk; PP_ASSERT_WITH_CODE(dep_on_sclk, "Invalid SOC_VDD-GFX_CLK Dependency Table!", @@ -1567,23 +1566,32 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, uint32_t soc_clock, uint8_t *current_soc_did, uint8_t *current_vol_index) { + struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc = - table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t i; - PP_ASSERT_WITH_CODE(dep_on_soc, - "Invalid SOC_VDD-SOC_CLK Dependency Table!", - return -EINVAL); - for (i = 0; i < dep_on_soc->count; i++) { - if (dep_on_soc->entries[i].clk == soc_clock) - break; + if (hwmgr->od_enabled) { + dep_on_soc = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &data->odn_dpm_table.vdd_dep_on_socclk; + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk >= soc_clock) + break; + } + } else { + dep_on_soc = table_info->vdd_dep_on_socclk; + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk == soc_clock) + break; + } } + PP_ASSERT_WITH_CODE(dep_on_soc->count > i, "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, soc_clock, ÷rs), @@ -1592,22 +1600,6 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, *current_soc_did = (uint8_t)dividers.ulDid; *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); - - return 0; -} - -uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr, - uint32_t clk, - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) -{ - uint16_t i; - - for (i = 0; i < dep_table->count; i++) { - if (dep_table->entries[i].clk == clk) - return dep_table->entries[i].vddc; - } - - pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!"); return 0; } @@ -1621,8 +1613,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = - table_info->vdd_dep_on_socclk; PPTable_t *pp_table = &(data->smc_state_table.pp_table); struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); int result = 0; @@ -1653,11 +1643,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) dpm_table = &(data->dpm_table.soc_table); for (i = 0; i < dpm_table->count; i++) { - pp_table->SocVid[i] = - (uint8_t)convert_to_vid( - vega10_locate_vddc_given_clock(hwmgr, - dpm_table->dpm_levels[i].value, - dep_table)); result = vega10_populate_single_soc_level(hwmgr, dpm_table->dpm_levels[i].value, &(pp_table->SocclkDid[i]), @@ -1668,7 +1653,6 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) j = i - 1; while (i < NUM_SOCCLK_DPM_LEVELS) { - pp_table->SocVid[i] = pp_table->SocVid[j]; result = vega10_populate_single_soc_level(hwmgr, dpm_table->dpm_levels[j].value, &(pp_table->SocclkDid[i]), @@ -1681,6 +1665,32 @@ static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) return result; } +static void vega10_populate_vddc_soc_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + + uint8_t soc_vid = 0; + uint32_t i, max_vddc_level; + + if (hwmgr->od_enabled) + vddc_lookup_table = (struct phm_ppt_v1_voltage_lookup_table *)&data->odn_dpm_table.vddc_lookup_table; + else + vddc_lookup_table = table_info->vddc_lookup_table; + + max_vddc_level = vddc_lookup_table->count; + for (i = 0; i < max_vddc_level; i++) { + soc_vid = (uint8_t)convert_to_vid(vddc_lookup_table->entries[i].us_vdd); + pp_table->SocVid[i] = soc_vid; + } + while (i < MAX_REGULAR_DPM_NUMBER) { + pp_table->SocVid[i] = soc_vid; + i++; + } +} + /** * @brief Populates single SMC GFXCLK structure using the provided clock. * @@ -1695,25 +1705,25 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, struct vega10_hwmgr *data = hwmgr->backend; struct phm_ppt_v2_information *table_info = (struct phm_ppt_v2_information *)(hwmgr->pptable); - struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk = - table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk; struct pp_atomfwctrl_clock_dividers_soc15 dividers; uint32_t mem_max_clock = hwmgr->platform_descriptor.overdriveLimit.memoryClock; uint32_t i = 0; - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_VDDC) + if (hwmgr->od_enabled) dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) - &data->odn_dpm_table.vdd_dependency_on_mclk; + &data->odn_dpm_table.vdd_dep_on_mclk; + else + dep_on_mclk = table_info->vdd_dep_on_mclk; PP_ASSERT_WITH_CODE(dep_on_mclk, "Invalid SOC_VDD-UCLK Dependency Table!", return -EINVAL); - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) { mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock; - else { + } else { for (i = 0; i < dep_on_mclk->count; i++) { if (dep_on_mclk->entries[i].clk == mem_clock) break; @@ -2057,6 +2067,9 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) if (data->smu_features[GNLD_AVFS].supported) { result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); if (!result) { + data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc; + data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc; + pp_table->MinVoltageVid = (uint8_t) convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); pp_table->MaxVoltageVid = (uint8_t) @@ -2335,6 +2348,22 @@ static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) return 0; } +static int vega10_update_avfs(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + vega10_avfs_enable(hwmgr, false); + } else if (data->need_update_dpm_table) { + vega10_avfs_enable(hwmgr, false); + vega10_avfs_enable(hwmgr, true); + } else { + vega10_avfs_enable(hwmgr, true); + } + + return 0; +} + static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr) { int result = 0; @@ -2396,6 +2425,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to setup default DPM tables!", return result); + /* initialize ODN table */ + if (hwmgr->od_enabled) + vega10_odn_initial_default_setting(hwmgr); + pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, &voltage_table); pp_table->MaxVidStep = voltage_table.max_vid_step; @@ -2442,6 +2475,8 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to initialize Memory Level!", return result); + vega10_populate_vddc_soc_levels(hwmgr); + result = vega10_populate_all_display_clock_levels(hwmgr); PP_ASSERT_WITH_CODE(!result, "Failed to initialize Display Level!", @@ -3164,82 +3199,11 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) { - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct vega10_power_state *vega10_ps = - cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_hwmgr *data = hwmgr->backend; - struct vega10_single_dpm_table *sclk_table = - &(data->dpm_table.gfx_table); - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; - struct vega10_single_dpm_table *mclk_table = - &(data->dpm_table.mem_table); - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; - struct PP_Clocks min_clocks = {0}; - uint32_t i; - - data->need_update_dpm_table = 0; - - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (!(data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) { - /* Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != - min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= - VEGA10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= - VEGA10_MINIMUM_ENGINE_CLOCK)) - data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - - if (data->display_timing.num_existing_displays != - hwmgr->display_config->num_display) - data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - } else { - for (i = 0; i < sclk_table->count; i++) { - if (sclk == sclk_table->dpm_levels[i].value) - break; - } - - if (i >= sclk_table->count) - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; - else { - /* Check SCLK in DAL's minimum clocks - * in case DeepSleep divider update is required. - */ - if (data->display_timing.min_clock_in_sr != - min_clocks.engineClockInSR && - (min_clocks.engineClockInSR >= - VEGA10_MINIMUM_ENGINE_CLOCK || - data->display_timing.min_clock_in_sr >= - VEGA10_MINIMUM_ENGINE_CLOCK)) - data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; - } - for (i = 0; i < mclk_table->count; i++) { - if (mclk == mclk_table->dpm_levels[i].value) - break; - } - - if (i >= mclk_table->count) - data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - if (data->display_timing.num_existing_displays != - hwmgr->display_config->num_display || - i >= mclk_table->count) - data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; - } return 0; } @@ -3247,194 +3211,29 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels( struct pp_hwmgr *hwmgr, const void *input) { int result = 0; - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; - const struct vega10_power_state *vega10_ps = - cast_const_phw_vega10_power_state(states->pnew_state); struct vega10_hwmgr *data = hwmgr->backend; - uint32_t sclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].gfx_clock; - uint32_t mclk = vega10_ps->performance_levels - [vega10_ps->performance_level_count - 1].mem_clock; - struct vega10_dpm_table *dpm_table = &data->dpm_table; - struct vega10_dpm_table *golden_dpm_table = - &data->golden_dpm_table; - uint32_t dpm_count, clock_percent; - uint32_t i; - - if (PP_CAP(PHM_PlatformCaps_ODNinACSupport) || - PP_CAP(PHM_PlatformCaps_ODNinDCSupport)) { - - if (!data->need_update_dpm_table && - !data->apply_optimized_settings && - !data->apply_overdrive_next_settings_mask) - return 0; - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK) { - for (dpm_count = 0; - dpm_count < dpm_table->gfx_table.count; - dpm_count++) { - dpm_table->gfx_table.dpm_levels[dpm_count].enabled = - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].enabled; - dpm_table->gfx_table.dpm_levels[dpm_count].value = - data->odn_dpm_table.odn_core_clock_dpm_levels.entries[dpm_count].clock; - } - } - - if (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_MCLK) { - for (dpm_count = 0; - dpm_count < dpm_table->mem_table.count; - dpm_count++) { - dpm_table->mem_table.dpm_levels[dpm_count].enabled = - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].enabled; - dpm_table->mem_table.dpm_levels[dpm_count].value = - data->odn_dpm_table.odn_memory_clock_dpm_levels.entries[dpm_count].clock; - } - } - - if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) || - data->apply_optimized_settings || - (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_SCLK)) { - result = vega10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } - - if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) || - (data->apply_overdrive_next_settings_mask & - DPMTABLE_OD_UPDATE_MCLK)){ - result = vega10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - } else { - if (!data->need_update_dpm_table && - !data->apply_optimized_settings) - return 0; - - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK && - data->smu_features[GNLD_DPM_GFXCLK].supported) { - dpm_table-> - gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. - value = sclk; - if (hwmgr->od_enabled) { - /* Need to do calculation based on the golden DPM table - * as the Heatmap GPU Clock axis is also based on - * the default values - */ - PP_ASSERT_WITH_CODE( - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value, - "Divide by 0!", - return -1); - - dpm_count = dpm_table->gfx_table.count < 2 ? - 0 : dpm_table->gfx_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (sclk > golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value) { - clock_percent = - ((sclk - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value) * - 100) / - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value; - - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value + - (golden_dpm_table->gfx_table.dpm_levels[i].value * - clock_percent) / 100; - } else if (golden_dpm_table-> - gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value > - sclk) { - clock_percent = - ((golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count - 1].value - - sclk) * 100) / - golden_dpm_table->gfx_table.dpm_levels - [golden_dpm_table->gfx_table.count-1].value; - - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value - - (golden_dpm_table->gfx_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->gfx_table.dpm_levels[i].value = - golden_dpm_table->gfx_table.dpm_levels[i].value; - } - } - } - - if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK && - data->smu_features[GNLD_DPM_UCLK].supported) { - dpm_table-> - mem_table.dpm_levels[dpm_table->mem_table.count - 1]. - value = mclk; + if (!data->need_update_dpm_table) + return 0; - if (hwmgr->od_enabled) { - PP_ASSERT_WITH_CODE( - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count - 1].value, - "Divide by 0!", - return -1); + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", + return result); + } - dpm_count = dpm_table->mem_table.count < 2 ? - 0 : dpm_table->mem_table.count - 2; - for (i = dpm_count; i > 1; i--) { - if (mclk > golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value) { - clock_percent = ((mclk - - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value) * - 100) / - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value; - - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value + - (golden_dpm_table->mem_table.dpm_levels[i].value * - clock_percent) / 100; - } else if (golden_dpm_table->mem_table.dpm_levels - [dpm_table->mem_table.count-1].value > mclk) { - clock_percent = ((golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value - mclk) * - 100) / - golden_dpm_table->mem_table.dpm_levels - [golden_dpm_table->mem_table.count-1].value; - - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value - - (golden_dpm_table->mem_table.dpm_levels[i].value * - clock_percent) / 100; - } else - dpm_table->mem_table.dpm_levels[i].value = - golden_dpm_table->mem_table.dpm_levels[i].value; - } - } - } + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE((0 == result), + "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", + return result); + } - if ((data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) || - data->apply_optimized_settings) { - result = vega10_populate_all_graphic_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate SCLK during PopulateNewDPMClocksStates Function!", - return result); - } + vega10_populate_vddc_soc_levels(hwmgr); - if (data->need_update_dpm_table & - (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { - result = vega10_populate_all_memory_levels(hwmgr); - PP_ASSERT_WITH_CODE(!result, - "Failed to populate MCLK during PopulateNewDPMClocksStates Function!", - return result); - } - } return result; } @@ -3730,8 +3529,9 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); - data->apply_optimized_settings = false; - data->apply_overdrive_next_settings_mask = 0; + vega10_update_avfs(hwmgr); + + data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC; return 0; } @@ -4383,6 +4183,8 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; + int i, now, size = 0; switch (type) { @@ -4421,6 +4223,40 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "", (i == now) ? "*" : ""); break; + case OD_SCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_SCLK"); + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; + for (i = 0; i < podn_vdd_dep->count; i++) + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", + i, podn_vdd_dep->entries[i].clk / 100, + podn_vdd_dep->entries[i].vddc); + } + break; + case OD_MCLK: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_MCLK"); + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; + for (i = 0; i < podn_vdd_dep->count; i++) + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", + i, podn_vdd_dep->entries[i].clk/100, + podn_vdd_dep->entries[i].vddc); + } + break; + case OD_RANGE: + if (hwmgr->od_enabled) { + size = sprintf(buf, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", + data->golden_dpm_table.mem_table.dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", + data->odn_dpm_table.min_vddc, + data->odn_dpm_table.max_vddc); + } + break; default: break; } @@ -4808,6 +4644,200 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui return 0; } + +static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + uint32_t clk, + uint32_t voltage) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct vega10_single_dpm_table *golden_table; + + if (voltage < odn_table->min_vddc || voltage > odn_table->max_vddc) { + pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table->min_vddc, odn_table->max_vddc); + return false; + } + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + golden_table = &(data->golden_dpm_table.gfx_table); + if (golden_table->dpm_levels[0].value > clk || + hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) { + pr_info("OD engine clock is out of range [%d - %d] MHz\n", + golden_table->dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.engineClock/100); + return false; + } + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + golden_table = &(data->golden_dpm_table.mem_table); + if (golden_table->dpm_levels[0].value > clk || + hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) { + pr_info("OD memory clock is out of range [%d - %d] MHz\n", + golden_table->dpm_levels[0].value/100, + hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); + return false; + } + } else { + return false; + } + + return true; +} + +static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_dpm_table *odn_table = &(data->odn_dpm_table); + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table; + uint32_t i; + + dep_table = table_info->vdd_dep_on_mclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_mclk); + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; + } + } + + dep_table = table_info->vdd_dep_on_sclk; + odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dep_on_sclk); + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; + } + } + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { + data->need_update_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK; + } +} + +static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct phm_ppt_v2_information *table_info = hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = table_info->vdd_dep_on_socclk; + struct vega10_single_dpm_table *dpm_table = &data->golden_dpm_table.soc_table; + + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_on_socclk = + &data->odn_dpm_table.vdd_dep_on_socclk; + struct vega10_odn_vddc_lookup_table *od_vddc_lookup_table = &data->odn_dpm_table.vddc_lookup_table; + + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep; + uint8_t i, j; + + if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; + for (i = 0; i < podn_vdd_dep->count - 1; i++) + od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; + if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc) + od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc; + } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) { + podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; + for (i = 0; i < dpm_table->count; i++) { + for (j = 0; j < od_vddc_lookup_table->count; j++) { + if (od_vddc_lookup_table->entries[j].us_vdd > + podn_vdd_dep->entries[i].vddc) + break; + } + if (j == od_vddc_lookup_table->count) { + od_vddc_lookup_table->entries[j-1].us_vdd = + podn_vdd_dep->entries[i].vddc; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + } + podn_vdd_dep->entries[i].vddInd = j; + } + dpm_table = &data->dpm_table.soc_table; + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vddInd == podn_vdd_dep->entries[dep_table->count-1].vddInd && + dep_table->entries[i].clk < podn_vdd_dep->entries[dep_table->count-1].clk) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[i].clk = podn_vdd_dep->entries[dep_table->count-1].clk; + dpm_table->dpm_levels[i].value = podn_vdd_dep_on_socclk->entries[i].clk; + } + } + if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk < + podn_vdd_dep->entries[dep_table->count-1].clk) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].clk = podn_vdd_dep->entries[dep_table->count-1].clk; + dpm_table->dpm_levels[podn_vdd_dep_on_socclk->count - 1].value = podn_vdd_dep->entries[dep_table->count-1].clk; + } + if (podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd < + podn_vdd_dep->entries[dep_table->count-1].vddInd) { + data->need_update_dpm_table |= DPMTABLE_UPDATE_SOCCLK; + podn_vdd_dep_on_socclk->entries[podn_vdd_dep_on_socclk->count - 1].vddInd = podn_vdd_dep->entries[dep_table->count-1].vddInd; + } + } +} + +static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + struct vega10_hwmgr *data = hwmgr->backend; + struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep_table; + struct vega10_single_dpm_table *dpm_table; + + uint32_t input_clk; + uint32_t input_vol; + uint32_t input_level; + uint32_t i; + + PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage", + return -EINVAL); + + if (!hwmgr->od_enabled) { + pr_info("OverDrive feature not enabled\n"); + return -EINVAL; + } + + if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) { + dpm_table = &data->dpm_table.gfx_table; + podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_sclk; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) { + dpm_table = &data->dpm_table.mem_table; + podn_vdd_dep_table = &data->odn_dpm_table.vdd_dep_on_mclk; + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { + memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table)); + vega10_odn_initial_default_setting(hwmgr); + return 0; + } else if (PP_OD_COMMIT_DPM_TABLE == type) { + vega10_check_dpm_table_updated(hwmgr); + return 0; + } else { + return -EINVAL; + } + + for (i = 0; i < size; i += 3) { + if (i + 3 > size || input[i] >= podn_vdd_dep_table->count) { + pr_info("invalid clock voltage input\n"); + return 0; + } + input_level = input[i]; + input_clk = input[i+1] * 100; + input_vol = input[i+2]; + + if (vega10_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) { + dpm_table->dpm_levels[input_level].value = input_clk; + podn_vdd_dep_table->entries[input_level].clk = input_clk; + podn_vdd_dep_table->entries[input_level].vddc = input_vol; + } else { + return -EINVAL; + } + } + vega10_odn_update_soc_table(hwmgr, type); + return 0; +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -4866,6 +4896,7 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_power_profile_mode = vega10_get_power_profile_mode, .set_power_profile_mode = vega10_set_power_profile_mode, .set_power_limit = vega10_set_power_limit, + .odn_edit_dpm_table = vega10_odn_edit_dpm_table, }; int vega10_enable_smc_features(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index 5339ea1f3dce..aadd6cbc7e85 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -282,15 +282,21 @@ struct vega10_registry_data { struct vega10_odn_clock_voltage_dependency_table { uint32_t count; - struct phm_ppt_v1_clock_voltage_dependency_record - entries[MAX_REGULAR_DPM_NUMBER]; + struct phm_ppt_v1_clock_voltage_dependency_record entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_odn_vddc_lookup_table { + uint32_t count; + struct phm_ppt_v1_voltage_lookup_record entries[MAX_REGULAR_DPM_NUMBER]; }; struct vega10_odn_dpm_table { - struct phm_odn_clock_levels odn_core_clock_dpm_levels; - struct phm_odn_clock_levels odn_memory_clock_dpm_levels; - struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; - struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_sclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_mclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dep_on_socclk; + struct vega10_odn_vddc_lookup_table vddc_lookup_table; + uint32_t max_vddc; + uint32_t min_vddc; }; struct vega10_odn_fan_table { @@ -301,8 +307,8 @@ struct vega10_odn_fan_table { }; struct vega10_hwmgr { - struct vega10_dpm_table dpm_table; - struct vega10_dpm_table golden_dpm_table; + struct vega10_dpm_table dpm_table; + struct vega10_dpm_table golden_dpm_table; struct vega10_registry_data registry_data; struct vega10_vbios_boot_state vbios_boot_state; struct vega10_mclk_latency_table mclk_latency_table; @@ -368,12 +374,8 @@ struct vega10_hwmgr { bool need_long_memory_training; /* Internal settings to apply the application power optimization parameters */ - bool apply_optimized_settings; uint32_t disable_dpm_mask; - /* ---- Overdrive next setting ---- */ - uint32_t apply_overdrive_next_settings_mask; - /* ---- SMU9 ---- */ struct smu_features smu_features[GNLD_FEATURES_MAX]; struct vega10_smc_state_table smc_state_table; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 8b78bbecd1bc..9bb87857a20f 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -377,11 +377,7 @@ struct phm_clocks { #define DPMTABLE_UPDATE_SCLK 0x00000004 #define DPMTABLE_UPDATE_MCLK 0x00000008 #define DPMTABLE_OD_UPDATE_VDDC 0x00000010 - -/* To determine if sclk and mclk are in overdrive state */ -#define SCLK_OVERDRIVE_ENABLED 0x00000001 -#define MCLK_OVERDRIVE_ENABLED 0x00000002 -#define VDDC_OVERDRIVE_ENABLED 0x00000010 +#define DPMTABLE_UPDATE_SOCCLK 0x00000020 struct phm_odn_performance_level { uint32_t clock; -- cgit v1.2.1 From 48ff108d9dc42bf92256484c50cdb3697f5ccb04 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:18:24 -0500 Subject: drm/amdgpu: add VEGAM ASIC type Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9e917f53f357..8ce60e6e2614 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -83,6 +83,7 @@ static const char *amdgpu_asic_name[] = { "POLARIS10", "POLARIS11", "POLARIS12", + "VEGAM", "VEGA10", "VEGA12", "RAVEN", -- cgit v1.2.1 From cc07f18ddb618af5ad28669dcb32b27e2f2312af Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:19:58 -0500 Subject: drm/amdgpu: bypass GPU info firmware load for VEGAM Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 8ce60e6e2614..47b65f3a1927 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1367,9 +1367,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_CARRIZO: case CHIP_STONEY: #ifdef CONFIG_DRM_AMDGPU_SI -- cgit v1.2.1 From 32cc7e536a546e4e2ad9ac75d02ce07d9d2327f2 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:22:54 -0500 Subject: drm/amdgpu: set VEGAM to ASIC family and ip blocks Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 47b65f3a1927..7929ff83f3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1476,9 +1476,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) case CHIP_TOPAZ: case CHIP_TONGA: case CHIP_FIJI: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_CARRIZO: case CHIP_STONEY: if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) -- cgit v1.2.1 From 34fd54bc0891b0d835de73978ba5277665814be3 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:26:54 -0500 Subject: drm/amdgpu: specify VEGAM ucode SMU load method Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 0c74c09ef3b0..ee71c40b3920 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -295,6 +295,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else -- cgit v1.2.1 From 5830bb986dcd6aea290ef54446e077c09cc8498e Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:24:47 -0500 Subject: drm/amdgpu: add VEGAM SMU firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 +++ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a8a942c60ea2..5b3d3bf5b599 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -385,6 +385,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_POLARIS12: strcpy(fw_name, "amdgpu/polaris12_smc.bin"); break; + case CHIP_VEGAM: + strcpy(fw_name, "amdgpu/vegam_smc.bin"); + break; case CHIP_VEGA10: if ((adev->pdev->device == 0x687f) && ((adev->pdev->revision == 0xc0) || diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c28b60aae5f8..ee236dfbf1d6 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -41,6 +41,7 @@ MODULE_FIRMWARE("amdgpu/polaris11_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris11_k_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris12_smc.bin"); +MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); MODULE_FIRMWARE("amdgpu/vega12_smc.bin"); -- cgit v1.2.1 From be2c8cde0b867033914fc48d51b0cca0481b39b6 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 3 Nov 2017 14:22:16 -0400 Subject: drm/amdgpu/virtual_dce: add VEGAM support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 6454cc371f57..de7be3de0f41 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -460,8 +460,9 @@ static int dce_virtual_hw_init(void *handle) break; case CHIP_CARRIZO: case CHIP_STONEY: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_VEGAM: dce_v11_0_disable_dce(adev); break; case CHIP_TOPAZ: -- cgit v1.2.1 From 675fd32b2730f362b425a65f99fcc1eae8898fc5 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 8 Nov 2017 18:07:12 -0500 Subject: drm/amdgpu: add VEGAM dc support check Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7929ff83f3ed..e6657ec363b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2147,9 +2147,10 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_MULLINS: case CHIP_CARRIZO: case CHIP_STONEY: - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: case CHIP_TONGA: case CHIP_FIJI: case CHIP_VEGA10: -- cgit v1.2.1 From 589ecd753aa9e69ea40e307d2a0c013b03e418f1 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 16 Nov 2017 13:15:12 -0500 Subject: drm/amdgpu: skip VEGAM MC firmware load Directly loaded by VBIOS Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 4d970daa65f4..97fcca805d48 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -231,6 +231,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: case CHIP_CARRIZO: case CHIP_STONEY: + case CHIP_VEGAM: return 0; default: BUG(); } -- cgit v1.2.1 From 13b75aac5dd9a6448417769c43d21b2343ce1cc8 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:18:20 -0500 Subject: drm/amdgpu: add VEGAM GMC golden settings Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 97fcca805d48..6721b04b7796 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -138,6 +138,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: amdgpu_device_program_register_sequence(adev, golden_settings_polaris11_a11, ARRAY_SIZE(golden_settings_polaris11_a11)); -- cgit v1.2.1 From f43c72ba03152920c52f1921e45100c6c090faef Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:20:35 -0500 Subject: drm/amdgpu: initialize VEGAM GMC (v2) v2: use proper register rather than hardcoding. Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 6721b04b7796..1edbe6b477b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -569,9 +569,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) /* set the gart size */ if (amdgpu_gart_size == -1) { switch (adev->asic_type) { - case CHIP_POLARIS11: /* all engines support GPUVM */ case CHIP_POLARIS10: /* all engines support GPUVM */ + case CHIP_POLARIS11: /* all engines support GPUVM */ case CHIP_POLARIS12: /* all engines support GPUVM */ + case CHIP_VEGAM: /* all engines support GPUVM */ default: adev->gmc.gart_size = 256ULL << 20; break; @@ -1091,7 +1092,8 @@ static int gmc_v8_0_sw_init(void *handle) } else { u32 tmp; - if (adev->asic_type == CHIP_FIJI) + if ((adev->asic_type == CHIP_FIJI) || + (adev->asic_type == CHIP_VEGAM)) tmp = RREG32(mmMC_SEQ_MISC0_FIJI); else tmp = RREG32(mmMC_SEQ_MISC0); -- cgit v1.2.1 From 2267e26241d6cd0c8d92614a4a70562b009354c9 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:56:12 -0500 Subject: drm/amdgpu: add VEGAM SDMA firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index be20a387d961..add0b80a5355 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -62,6 +62,8 @@ MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/vegam_sdma.bin"); +MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin"); static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = @@ -275,15 +277,18 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: chip_name = "fiji"; break; - case CHIP_POLARIS11: - chip_name = "polaris11"; - break; case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; case CHIP_POLARIS12: chip_name = "polaris12"; break; + case CHIP_VEGAM: + chip_name = "vegam"; + break; case CHIP_CARRIZO: chip_name = "carrizo"; break; -- cgit v1.2.1 From c3f27c08ec15b61bed2a1af592ab5bdc89fe7dee Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:22:20 -0500 Subject: drm/amdgpu: add VEGAM SDMA golden settings Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index add0b80a5355..aa9ab299fd32 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -211,6 +211,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) break; case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: amdgpu_device_program_register_sequence(adev, golden_settings_polaris11_a11, ARRAY_SIZE(golden_settings_polaris11_a11)); -- cgit v1.2.1 From 62aac2010de1d233739c30a168d0bbff31b3cb43 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 10 Nov 2017 11:04:09 -0500 Subject: drm/amdgpu: add VEGAM GFX firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 42 ++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index e14263fca1c9..2be287078ec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -125,18 +125,6 @@ MODULE_FIRMWARE("amdgpu/fiji_mec.bin"); MODULE_FIRMWARE("amdgpu/fiji_mec2.bin"); MODULE_FIRMWARE("amdgpu/fiji_rlc.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin"); -MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); - MODULE_FIRMWARE("amdgpu/polaris10_ce.bin"); MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin"); @@ -149,6 +137,18 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin"); MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_ce.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_me.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin"); +MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin"); + MODULE_FIRMWARE("amdgpu/polaris12_ce.bin"); MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin"); @@ -161,6 +161,13 @@ MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin"); MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin"); +MODULE_FIRMWARE("amdgpu/vegam_ce.bin"); +MODULE_FIRMWARE("amdgpu/vegam_pfp.bin"); +MODULE_FIRMWARE("amdgpu/vegam_me.bin"); +MODULE_FIRMWARE("amdgpu/vegam_mec.bin"); +MODULE_FIRMWARE("amdgpu/vegam_mec2.bin"); +MODULE_FIRMWARE("amdgpu/vegam_rlc.bin"); + static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] = { {mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0}, @@ -918,17 +925,20 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) case CHIP_FIJI: chip_name = "fiji"; break; - case CHIP_POLARIS11: - chip_name = "polaris11"; + case CHIP_STONEY: + chip_name = "stoney"; break; case CHIP_POLARIS10: chip_name = "polaris10"; break; + case CHIP_POLARIS11: + chip_name = "polaris11"; + break; case CHIP_POLARIS12: chip_name = "polaris12"; break; - case CHIP_STONEY: - chip_name = "stoney"; + case CHIP_VEGAM: + chip_name = "vegam"; break; default: BUG(); -- cgit v1.2.1 From aefbbd6cc55cba823fecd0231116a8e1073e4892 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 16 Nov 2017 13:41:03 -0500 Subject: drm/amdgpu: add VEGAM GFX golden settings Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 39 +++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 2be287078ec6..d789723f0478 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -299,6 +299,37 @@ static const u32 tonga_mgcg_cgcg_init[] = mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, }; +static const u32 golden_settings_vegam_a11[] = +{ + mmCB_HW_CONTROL, 0x0001f3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0x0f000000, 0x0d000000, + mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, + mmDB_DEBUG2, 0xf00fffff, 0x00000400, + mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, + mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000, + mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x3a00161a, + mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x0000002e, + mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c, + mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c, + mmSQ_CONFIG, 0x07f80000, 0x01180000, + mmTA_CNTL_AUX, 0x000f000f, 0x000b0000, + mmTCC_CTRL, 0x00100000, 0xf31fff7f, + mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000f7, + mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000, + mmTCP_CHAN_STEER_LO, 0xffffffff, 0x32761054, + mmVGT_RESET_DEBUG, 0x00000004, 0x00000004, +}; + +static const u32 vegam_golden_common_all[] = +{ + mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000, + mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003, + mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800, + mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00FF7FBF, + mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00FF7FAF, +}; + static const u32 golden_settings_polaris11_a11[] = { mmCB_HW_CONTROL, 0x0000f3cf, 0x00007208, @@ -719,6 +750,14 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) tonga_golden_common_all, ARRAY_SIZE(tonga_golden_common_all)); break; + case CHIP_VEGAM: + amdgpu_device_program_register_sequence(adev, + golden_settings_vegam_a11, + ARRAY_SIZE(golden_settings_vegam_a11)); + amdgpu_device_program_register_sequence(adev, + vegam_golden_common_all, + ARRAY_SIZE(vegam_golden_common_all)); + break; case CHIP_POLARIS11: case CHIP_POLARIS12: amdgpu_device_program_register_sequence(adev, -- cgit v1.2.1 From 7176546958ddd7d4732d2a19692a3b14e3519caa Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 16 Nov 2017 13:49:56 -0500 Subject: drm/amdgpu: initialize VEGAM GFX Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d789723f0478..818874b13c99 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1819,6 +1819,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = POLARIS11_GB_ADDR_CONFIG_GOLDEN; break; case CHIP_POLARIS10: + case CHIP_VEGAM: ret = amdgpu_atombios_get_gfx_info(adev); if (ret) return ret; @@ -2006,12 +2007,13 @@ static int gfx_v8_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; switch (adev->asic_type) { - case CHIP_FIJI: case CHIP_TONGA: + case CHIP_CARRIZO: + case CHIP_FIJI: + case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: - case CHIP_POLARIS10: - case CHIP_CARRIZO: + case CHIP_VEGAM: adev->gfx.mec.num_mec = 2; break; case CHIP_TOPAZ: @@ -2372,6 +2374,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) break; case CHIP_FIJI: + case CHIP_VEGAM: modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | @@ -3553,6 +3556,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1) { switch (adev->asic_type) { case CHIP_FIJI: + case CHIP_VEGAM: *rconf |= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) | RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) | PKR_YSEL(1) | @@ -4120,7 +4124,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) gfx_v8_0_init_power_gating(adev); WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); } else if ((adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) { + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) { gfx_v8_0_init_csb(adev); gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); @@ -4195,7 +4200,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_CGCG_CGLS_CTRL, tmp); if (adev->asic_type == CHIP_POLARIS11 || adev->asic_type == CHIP_POLARIS10 || - adev->asic_type == CHIP_POLARIS12) { + adev->asic_type == CHIP_POLARIS12 || + adev->asic_type == CHIP_VEGAM) { tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D); tmp &= ~0x3; WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp); @@ -5547,7 +5553,8 @@ static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade bool enable) { if ((adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) /* Send msg to SMU via Powerplay */ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_SMC, @@ -5637,6 +5644,7 @@ static int gfx_v8_0_set_powergating_state(void *handle, break; case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); else @@ -6203,6 +6211,7 @@ static int gfx_v8_0_set_clockgating_state(void *handle, case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: gfx_v8_0_polaris_update_gfx_clock_gating(adev, state); break; default: -- cgit v1.2.1 From ba8f7ad0e5b25851299cd45a63b57d843e50b577 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 10 Nov 2017 12:27:40 -0500 Subject: drm/amdgpu: add VEGAM UVD firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 627542b22ae4..d8dd4028c2bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -66,6 +66,7 @@ #define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin" #define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin" #define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin" +#define FIRMWARE_VEGAM "amdgpu/vegam_uvd.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" @@ -109,6 +110,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); MODULE_FIRMWARE(FIRMWARE_POLARIS12); +MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); @@ -172,6 +174,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_VEGA12: fw_name = FIRMWARE_VEGA12; break; + case CHIP_VEGAM: + fw_name = FIRMWARE_VEGAM; + break; default: return -EINVAL; } -- cgit v1.2.1 From 136b10ad9b515a7ffdfbf4df01941856682bf94e Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:24:01 -0500 Subject: drm/amdgpu: add VEGAM UVD encode support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index f26f515db2fb..6d3359889c0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -62,7 +62,7 @@ static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) { return ((adev->asic_type >= CHIP_POLARIS10) && - (adev->asic_type <= CHIP_POLARIS12) && + (adev->asic_type <= CHIP_VEGAM) && (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); } -- cgit v1.2.1 From f11ded5ec23602d651cab3381243c527ad8c55a9 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:25:57 -0500 Subject: drm/amdgpu: add VEGAM VCE firmware support Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index d7261e01ff8a..e2186eda3271 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -51,8 +51,9 @@ #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin" #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin" -#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" -#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" +#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin" +#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin" +#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin" #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" @@ -71,6 +72,7 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); MODULE_FIRMWARE(FIRMWARE_POLARIS12); +MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); @@ -132,6 +134,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_POLARIS12: fw_name = FIRMWARE_POLARIS12; break; + case CHIP_VEGAM: + fw_name = FIRMWARE_VEGAM; + break; case CHIP_VEGA10: fw_name = FIRMWARE_VEGA10; break; -- cgit v1.2.1 From a771289786824f15d4d4307242389d0499e83e59 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 10 Nov 2017 12:32:04 -0500 Subject: drm/amdgpu: add VEGAM to VCE harvest config Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index ac9617269a2f..0999c843f623 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -388,7 +388,8 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) default: if ((adev->asic_type == CHIP_POLARIS10) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) return AMDGPU_VCE_HARVEST_VCE1; return 0; -- cgit v1.2.1 From b51c5194a5b8d781e45a86776f2eec234f7567fe Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 11 Apr 2018 15:28:28 -0500 Subject: drm/amdgpu: add VEGAM support to vi Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vi.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 4034a2863226..4ac1288ab7df 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -305,9 +305,10 @@ static void vi_init_golden_registers(struct amdgpu_device *adev) stoney_mgcg_cgcg_init, ARRAY_SIZE(stoney_mgcg_cgcg_init)); break; - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: default: break; } @@ -1096,6 +1097,30 @@ static int vi_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x64; break; + case CHIP_VEGAM: + adev->cg_flags = 0; + /*AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_DRM_LS | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_VCE_MGCG;*/ + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x6E; + break; case CHIP_CARRIZO: adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | @@ -1487,6 +1512,7 @@ static int vi_common_set_clockgating_state(void *handle, case CHIP_POLARIS10: case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: vi_common_set_clockgating_state_by_smu(adev, state); default: break; @@ -1616,9 +1642,10 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v3_0_ip_block); } break; - case CHIP_POLARIS11: case CHIP_POLARIS10: + case CHIP_POLARIS11: case CHIP_POLARIS12: + case CHIP_VEGAM: amdgpu_device_ip_block_add(adev, &vi_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v8_1_ip_block); amdgpu_device_ip_block_add(adev, &tonga_ih_ip_block); -- cgit v1.2.1 From e930793280799e66c3197e2ee6e70b1129f8aa12 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 9 Nov 2017 13:25:31 -0500 Subject: drm/amdgpu: add VEGAM pci ids Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 998ba8e710de..739e7e09c8b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -541,6 +541,9 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, + /* VEGAM */ + {0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, + {0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM}, /* Vega 10 */ {0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, {0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, -- cgit v1.2.1 From 0c75d5acc80dc5247962370c9f555922197b1ec3 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Wed, 11 Apr 2018 15:39:35 -0500 Subject: drm/amd/display: Implement VEGAM device IDs in DC Implement device IDs for VEGAM Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 6 ++++++ drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c | 3 +++ drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c | 3 +++ drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 9 +++++++++ drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 7 +++++++ drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 6 ++++++ drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 3 +++ drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 3 +++ drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 3 +++ drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h | 3 +++ drivers/gpu/drm/amd/display/include/dal_asic_id.h | 7 +++++++ drivers/gpu/drm/amd/display/include/dal_types.h | 3 +++ 12 files changed, 56 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index d5d4586e6176..e6ca72c0d347 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -34,4 +34,10 @@ config DEBUG_KERNEL_DC if you want to hit kdgb_break in assert. +config DRM_AMD_DC_VEGAM + bool "VEGAM support" + depends on DRM_AMD_DC + help + Choose this option if you want to have + VEGAM support for display engine endmenu diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c index 2979358c6a55..be066c49b984 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c @@ -51,6 +51,9 @@ bool dal_bios_parser_init_cmd_tbl_helper( return true; case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif *h = dal_cmd_tbl_helper_dce112_get_table(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 9a4d30dd4969..9b9e06995805 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -52,6 +52,9 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 56f46a065a93..4ee3c26f7c13 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -59,6 +59,10 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi return BW_CALCS_VERSION_POLARIS10; if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) return BW_CALCS_VERSION_POLARIS11; +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_VEGAM; +#endif return BW_CALCS_VERSION_INVALID; case FAMILY_AI: @@ -2147,6 +2151,11 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ break; case BW_CALCS_VERSION_POLARIS10: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + /* TODO: Treat VEGAM the same as P10 for now + * Need to tune the para for VEGAM if needed */ + case BW_CALCS_VERSION_VEGAM: +#endif vbios.memory_type = bw_def_gddr5; vbios.dram_channel_width_in_bits = 32; vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index d7a92eca8a27..447729cd29f0 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -79,6 +79,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) { dc_version = DCE_VERSION_11_2; } +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) + dc_version = DCE_VERSION_11_22; +#endif break; case FAMILY_AI: dc_version = DCE_VERSION_12_0; @@ -125,6 +129,9 @@ struct resource_pool *dc_create_resource_pool( num_virtual_links, dc, asic_id); break; case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif res_pool = dce112_create_resource_pool( num_virtual_links, dc); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 67dad7f1e643..223db98a568a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -590,6 +590,9 @@ static uint32_t dce110_get_pix_clk_dividers( pll_settings, pix_clk_params); break; case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif case DCE_VERSION_12_0: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: @@ -979,6 +982,9 @@ static bool dce110_program_pix_clk( break; case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif case DCE_VERSION_12_0: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 87b580fa4bc9..61fe484da1a0 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -75,6 +75,9 @@ bool dal_hw_factory_init( return true; case DCE_VERSION_11_0: case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif dal_hw_factory_dce110_init(factory); return true; case DCE_VERSION_12_0: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 0ae8ace25739..910ae2b7bf64 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -72,6 +72,9 @@ bool dal_hw_translate_init( case DCE_VERSION_10_0: case DCE_VERSION_11_0: case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif dal_hw_translate_dce110_init(translate); return true; case DCE_VERSION_12_0: diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c index 5cbf6626b8d4..c3d7c320fdba 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c @@ -83,6 +83,9 @@ struct i2caux *dal_i2caux_create( case DCE_VERSION_8_3: return dal_i2caux_dce80_create(ctx); case DCE_VERSION_11_2: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case DCE_VERSION_11_22: +#endif return dal_i2caux_dce112_create(ctx); case DCE_VERSION_11_0: return dal_i2caux_dce110_create(ctx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h index 0bd87f24fc06..933ea7a1e18b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h @@ -43,6 +43,9 @@ enum bw_calcs_version { BW_CALCS_VERSION_POLARIS10, BW_CALCS_VERSION_POLARIS11, BW_CALCS_VERSION_POLARIS12, +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + BW_CALCS_VERSION_VEGAM, +#endif BW_CALCS_VERSION_STONEY, BW_CALCS_VERSION_VEGA10 }; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 9831cb5eaa7c..3e8e535e08f2 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -98,7 +98,14 @@ (eChipRev < VI_POLARIS11_M_A0)) #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \ (eChipRev < VI_POLARIS12_V_A0)) +#if defined(CONFIG_DRM_AMD_DC_VEGAM) +#define VI_VEGAM_A0 110 +#define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \ + (eChipRev < VI_VEGAM_A0)) +#define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0) +#else #define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0) +#endif /* DCE11 */ #define CZ_CARRIZO_A0 0x01 diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index fa543965feb5..5b1f8cef0c22 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -40,6 +40,9 @@ enum dce_version { DCE_VERSION_10_0, DCE_VERSION_11_0, DCE_VERSION_11_2, +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + DCE_VERSION_11_22, +#endif DCE_VERSION_12_0, DCE_VERSION_MAX, DCN_VERSION_1_0, -- cgit v1.2.1 From 7737de91633b1cd6b3a0b15347a633667a9bc2fc Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Thu, 9 Nov 2017 11:51:13 -0500 Subject: drm/amd/display: Implement VEGAM device IDs in DM Add CHIP_VEGAM Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 656a01891f6c..8379a3705f2d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1524,6 +1524,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case CHIP_VEGAM: +#endif case CHIP_VEGA10: case CHIP_VEGA12: if (dce110_register_irq_handlers(dm->adev)) { @@ -1716,6 +1719,9 @@ static int dm_early_init(void *handle) adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_POLARIS10: +#if defined(CONFIG_DRM_AMD_DC_VEGAM) + case CHIP_VEGAM: +#endif adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; -- cgit v1.2.1 From 221adb2172f10ebc3a1f86c18923692a58cff1de Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Apr 2018 16:38:46 -0500 Subject: drm/amdgpu: Add VEGAM support to the legacy DCE 11 module DC is preferred. Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d3ae508b2a92..a5b96eac3033 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -173,6 +173,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) ARRAY_SIZE(polaris11_golden_settings_a11)); break; case CHIP_POLARIS10: + case CHIP_VEGAM: amdgpu_device_program_register_sequence(adev, polaris10_golden_settings_a11, ARRAY_SIZE(polaris10_golden_settings_a11)); @@ -473,6 +474,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) num_crtc = 2; break; case CHIP_POLARIS10: + case CHIP_VEGAM: num_crtc = 6; break; case CHIP_POLARIS11: @@ -1445,6 +1447,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev) adev->mode_info.audio.num_pins = 7; break; case CHIP_POLARIS10: + case CHIP_VEGAM: adev->mode_info.audio.num_pins = 8; break; case CHIP_POLARIS11: @@ -2248,7 +2251,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) if ((adev->asic_type == CHIP_POLARIS10) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) { + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; @@ -2666,7 +2670,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, if ((adev->asic_type == CHIP_POLARIS10) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) { + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(amdgpu_crtc->encoder); int encoder_mode = @@ -2823,6 +2828,7 @@ static int dce_v11_0_early_init(void *handle) adev->mode_info.num_dig = 9; break; case CHIP_POLARIS10: + case CHIP_VEGAM: adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; break; @@ -2942,7 +2948,8 @@ static int dce_v11_0_hw_init(void *handle) amdgpu_atombios_encoder_init_dig(adev); if ((adev->asic_type == CHIP_POLARIS10) || (adev->asic_type == CHIP_POLARIS11) || - (adev->asic_type == CHIP_POLARIS12)) { + (adev->asic_type == CHIP_POLARIS12) || + (adev->asic_type == CHIP_VEGAM)) { amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); amdgpu_atombios_crtc_set_dce_clock(adev, 0, -- cgit v1.2.1 From 48231fd51667a89514d0eaba893ae0743fd0877d Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 21 Nov 2017 13:34:48 -0500 Subject: drm/amd/display: Use HBR2 if eDP monitor it doesn't advertise link rate Some eDP displays use the extra link rate table to advertise link rate support. If they do that they don't need to provide link rate through the usual registers. Since we don't currently have support for the extra link rate table default to HBR2 for the display in this. Note that this is a HACK. Ultimately we need to teach DC to use the extra link rate table. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 07cc4385a7c1..0a190c2b6898 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2393,6 +2393,10 @@ bool detect_dp_sink_caps(struct dc_link *link) void detect_edp_sink_caps(struct dc_link *link) { retrieve_link_cap(link); + + if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) + link->reported_link_cap.link_rate = LINK_RATE_HIGH2; + link->verified_link_cap = link->reported_link_cap; } -- cgit v1.2.1 From f4ad6fa99772969c16c3fc8877e450b48e93e102 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Thu, 9 Nov 2017 16:29:28 -0500 Subject: drm/amd/powerplay: add smu75 header files Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/smu75.h | 760 ++++++++++++++++++ drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h | 886 +++++++++++++++++++++ 2 files changed, 1646 insertions(+) create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/smu75_discrete.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu75.h b/drivers/gpu/drm/amd/powerplay/inc/smu75.h new file mode 100644 index 000000000000..771523001533 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/smu75.h @@ -0,0 +1,760 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef SMU75_H +#define SMU75_H + +#pragma pack(push, 1) + +typedef struct { + uint32_t high; + uint32_t low; +} data_64_t; + +typedef struct { + data_64_t high; + data_64_t low; +} data_128_t; + +#define SMU__DGPU_ONLY + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 8 + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU75_MAX_LEVELS_VDDC 16 +#define SMU75_MAX_LEVELS_VDDGFX 16 +#define SMU75_MAX_LEVELS_VDDCI 8 +#define SMU75_MAX_LEVELS_MVDD 4 + +#define SMU_MAX_SMIO_LEVELS 4 + +#define SMU75_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE +#define SMU75_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS +#define SMU75_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS +#define SMU75_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS +#define SMU75_MAX_LEVELS_UVD 8 +#define SMU75_MAX_LEVELS_VCE 8 +#define SMU75_MAX_LEVELS_ACP 8 +#define SMU75_MAX_LEVELS_SAMU 8 +#define SMU75_MAX_ENTRIES_SMIO 32 + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< Date: Fri, 17 Nov 2017 11:17:48 -0500 Subject: drm/amd: add a new struct in atombios.h Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atombios.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index f696bbb643ef..7931502fa54f 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -632,6 +632,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 ULONG ulReserved; }COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2; +typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 +{ + COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock; + USHORT usMclk_fcw_frac; //fractional divider of fcw = usSclk_fcw_frac/65536 + USHORT usMclk_fcw_int; //integer divider of fcwc +}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3; + //Input parameter of DynamicMemorySettingsTable //when ATOM_COMPUTE_CLOCK_FREQ.ulComputeClockFlag = COMPUTE_MEMORY_PLL_PARAM typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER -- cgit v1.2.1 From 4eeed17e713b9e6494a08ab37623283723596b5a Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 17 Nov 2017 11:21:02 -0500 Subject: drm/amd/powerplay: update ppatomctrl.c (v2) used for calculating memory clocks in powerplay. v2: handle endian swapping of atom data (Alex) Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 31 ++++++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h | 10 ++++++++ 2 files changed, 41 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index d58be7eb8256..cf99c5eaf080 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -23,6 +23,7 @@ #include "pp_debug.h" #include #include +#include #include "atom.h" #include "ppatomctrl.h" #include "atombios.h" @@ -314,6 +315,36 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, return result; } +int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, + uint32_t clock_value, + pp_atomctrl_memory_clock_param_ai *mpll_param) +{ + struct amdgpu_device *adev = hwmgr->adev; + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0}; + int result; + + mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); + + result = amdgpu_atom_execute_table(adev->mode_info.atom_context, + GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), + (uint32_t *)&mpll_parameters); + + /* VEGAM's mpll takes sometime to finish computing */ + udelay(10); + + if (!result) { + mpll_param->ulMclk_fcw_int = + le16_to_cpu(mpll_parameters.usMclk_fcw_int); + mpll_param->ulMclk_fcw_frac = + le16_to_cpu(mpll_parameters.usMclk_fcw_frac); + mpll_param->ulClock = + le32_to_cpu(mpll_parameters.ulClock.ulClock); + mpll_param->ulPostDiv = mpll_parameters.ulClock.ucPostDiv; + } + + return result; +} + int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index e1b5d6b0b548..3ee54f182943 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -146,6 +146,14 @@ struct pp_atomctrl_memory_clock_param { }; typedef struct pp_atomctrl_memory_clock_param pp_atomctrl_memory_clock_param; +struct pp_atomctrl_memory_clock_param_ai { + uint32_t ulClock; + uint32_t ulPostDiv; + uint16_t ulMclk_fcw_frac; + uint16_t ulMclk_fcw_int; +}; +typedef struct pp_atomctrl_memory_clock_param_ai pp_atomctrl_memory_clock_param_ai; + struct pp_atomctrl_internal_ss_info { uint32_t speed_spectrum_percentage; /* in 1/100 percentage */ uint32_t speed_spectrum_rate; /* in KHz */ @@ -295,6 +303,8 @@ extern bool atomctrl_is_voltage_controlled_by_gpio_v3(struct pp_hwmgr *hwmgr, ui extern int atomctrl_get_voltage_table_v3(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint8_t voltage_mode, pp_atomctrl_voltage_table *voltage_table); extern int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_memory_clock_param *mpll_param); +extern int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, + uint32_t clock_value, pp_atomctrl_memory_clock_param_ai *mpll_param); extern int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr, uint32_t clock_value, pp_atomctrl_clock_dividers_kong *dividers); -- cgit v1.2.1 From 4dc1a2d9288dbba903696d2dd5d83b5311f2d026 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 17 Nov 2017 11:31:09 -0500 Subject: drm/amd/powerplay: update process pptables Add functionality to fetch gpio table from vbios. Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- .../amd/powerplay/hwmgr/process_pptables_v1_0.c | 37 ++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index 8516516eb6cc..f0d48b183d22 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -728,6 +728,32 @@ static int get_mm_clock_voltage_table( return 0; } +static int get_gpio_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_gpio_table **pp_tonga_gpio_table, + const ATOM_Tonga_GPIO_Table *atom_gpio_table) +{ + uint32_t table_size; + struct phm_ppt_v1_gpio_table *pp_gpio_table; + struct phm_ppt_v1_information *pp_table_information = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + table_size = sizeof(struct phm_ppt_v1_gpio_table); + pp_gpio_table = kzalloc(table_size, GFP_KERNEL); + if (!pp_gpio_table) + return -ENOMEM; + + if (pp_table_information->vdd_dep_on_sclk->count < + atom_gpio_table->ucVRHotTriggeredSclkDpmIndex) + PP_ASSERT_WITH_CODE(false, + "SCLK DPM index for VRHot cannot exceed the total sclk level count!",); + else + pp_gpio_table->vrhot_triggered_sclk_dpm_index = + atom_gpio_table->ucVRHotTriggeredSclkDpmIndex; + + *pp_tonga_gpio_table = pp_gpio_table; + + return 0; +} /** * Private Function used during initialization. * Initialize clock voltage dependency @@ -761,11 +787,15 @@ static int init_clock_voltage_dependency( const PPTable_Generic_SubTable_Header *pcie_table = (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usPCIETableOffset)); + const ATOM_Tonga_GPIO_Table *gpio_table = + (const ATOM_Tonga_GPIO_Table *)(((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGPIOTableOffset)); pp_table_information->vdd_dep_on_sclk = NULL; pp_table_information->vdd_dep_on_mclk = NULL; pp_table_information->mm_dep_table = NULL; pp_table_information->pcie_table = NULL; + pp_table_information->gpio_table = NULL; if (powerplay_table->usMMDependencyTableOffset != 0) result = get_mm_clock_voltage_table(hwmgr, @@ -810,6 +840,10 @@ static int init_clock_voltage_dependency( result = get_valid_clk(hwmgr, &pp_table_information->valid_sclk_values, pp_table_information->vdd_dep_on_sclk); + if (!result && gpio_table) + result = get_gpio_table(hwmgr, &pp_table_information->gpio_table, + gpio_table); + return result; } @@ -1116,6 +1150,9 @@ static int pp_tables_v1_0_uninitialize(struct pp_hwmgr *hwmgr) kfree(pp_table_information->pcie_table); pp_table_information->pcie_table = NULL; + kfree(pp_table_information->gpio_table); + pp_table_information->gpio_table = NULL; + kfree(hwmgr->pptable); hwmgr->pptable = NULL; -- cgit v1.2.1 From ac7822b0026fbc33f82023b155542426b1bd211b Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Wed, 11 Apr 2018 15:32:58 -0500 Subject: drm/amd/powerplay: add smumgr support for VEGAM (v2) The smumgr handles communication between the driver and the SMU for power management. v2: fix typo (Alex) Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 6 + drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- .../gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 2382 ++++++++++++++++++++ .../gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h | 75 + 4 files changed, 2464 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index d1052b5e0ca8..eecb11824412 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -40,6 +40,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs; extern const struct pp_smumgr_func tonga_smu_funcs; extern const struct pp_smumgr_func fiji_smu_funcs; extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vegam_smu_funcs; extern const struct pp_smumgr_func vega10_smu_funcs; extern const struct pp_smumgr_func vega12_smu_funcs; extern const struct pp_smumgr_func smu10_smu_funcs; @@ -136,6 +137,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) polaris_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); break; + case CHIP_VEGAM: + hwmgr->smumgr_funcs = &vegam_smu_funcs; + polaris_set_asic_special_caps(hwmgr); + hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 958755075421..0a200406a1ec 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -26,7 +26,7 @@ SMU_MGR = smumgr.o smu8_smumgr.o tonga_smumgr.o fiji_smumgr.o \ polaris10_smumgr.o iceland_smumgr.o \ smu7_smumgr.o vega10_smumgr.o smu10_smumgr.o ci_smumgr.o \ - vega12_smumgr.o + vega12_smumgr.o vegam_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c new file mode 100644 index 000000000000..c9a563399330 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -0,0 +1,2382 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "pp_debug.h" +#include "smumgr.h" +#include "smu_ucode_xfer_vi.h" +#include "vegam_smumgr.h" +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" +#include "oss/oss_3_0_d.h" +#include "gca/gfx_8_0_d.h" +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" +#include "ppatomctrl.h" +#include "cgs_common.h" +#include "smu7_ppsmc.h" + +#include "smu7_dyn_defaults.h" + +#include "smu7_hwmgr.h" +#include "hardwaremanager.h" +#include "ppatomctrl.h" +#include "atombios.h" +#include "pppcielanes.h" + +#include "dce/dce_11_2_d.h" +#include "dce/dce_11_2_sh_mask.h" + +#define PPVEGAM_TARGETACTIVITY_DFLT 50 + +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 +#define POWERTUNE_DEFAULT_SET_MAX 1 +#define VDDC_VDDCI_DELTA 200 +#define MC_CG_ARB_FREQ_F1 0x0b + +#define STRAP_ASIC_RO_LSB 2168 +#define STRAP_ASIC_RO_MSB 2175 + +#define PPSMC_MSG_ApplyAvfsCksOffVoltage ((uint16_t) 0x415) +#define PPSMC_MSG_EnableModeSwitchRLCNotification ((uint16_t) 0x305) + +static const struct vegam_pt_defaults +vegam_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { + /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, + * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ + { 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000, + { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61}, + { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } }, +}; + +static const sclkFcwRange_t Range_Table[NUM_SCLK_RANGE] = { + {VCO_2_4, POSTDIV_DIV_BY_16, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_16, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_8, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_8, 112, 224, 160}, + {VCO_2_4, POSTDIV_DIV_BY_4, 75, 160, 112}, + {VCO_3_6, POSTDIV_DIV_BY_4, 112, 216, 160}, + {VCO_2_4, POSTDIV_DIV_BY_2, 75, 160, 108}, + {VCO_3_6, POSTDIV_DIV_BY_2, 112, 216, 160} }; + +static int vegam_smu_init(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data; + + smu_data = kzalloc(sizeof(struct vegam_smumgr), GFP_KERNEL); + if (smu_data == NULL) + return -ENOMEM; + + hwmgr->smu_backend = smu_data; + + if (smu7_init(hwmgr)) { + kfree(smu_data); + return -EINVAL; + } + + return 0; +} + +static int vegam_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + /* Wait for smc boot up */ + /* PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(smumgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0) */ + + /* Assert reset */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + result = smu7_upload_smu_firmware_image(hwmgr); + if (result != 0) + return result; + + /* Clear status */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMU_STATUS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + /* De-assert reset */ + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS, INTERRUPTS_ENABLED, 1); + + + /* Call Test SMU message with 0x20000 offset to trigger SMU start */ + smu7_send_msg_to_smc_offset(hwmgr); + + /* Wait done bit to be set */ + /* Check pass/failed indicator */ + + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, SMU_STATUS, SMU_DONE, 0); + + if (1 != PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMU_STATUS, SMU_PASS)) + PP_ASSERT_WITH_CODE(false, "SMU Firmware start failed!", return -1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixFIRMWARE_FLAGS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 1); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int vegam_start_smu_in_non_protection_mode(struct pp_hwmgr *hwmgr) +{ + int result = 0; + + /* wait for smc boot up */ + PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND, RCU_UC_EVENTS, boot_seq_done, 0); + + /* Clear firmware interrupt enable flag */ + /* PHM_WRITE_VFPF_INDIRECT_FIELD(pSmuMgr, SMC_IND, SMC_SYSCON_MISC_CNTL, pre_fetcher_en, 1); */ + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixFIRMWARE_FLAGS, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, + rst_reg, 1); + + result = smu7_upload_smu_firmware_image(hwmgr); + if (result != 0) + return result; + + /* Set smc instruct start point at 0x0 */ + smu7_program_jump_on_start(hwmgr); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_CLOCK_CNTL_0, ck_disable, 0); + + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SMC_SYSCON_RESET_CNTL, rst_reg, 0); + + /* Wait for firmware to initialize */ + + PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, + FIRMWARE_FLAGS, INTERRUPTS_ENABLED, 1); + + return result; +} + +static int vegam_start_smu(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Only start SMC if SMC RAM is not running */ + if (!smu7_is_smc_ram_running(hwmgr) && hwmgr->not_vf) { + smu_data->protected_mode = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_MODE)); + smu_data->smu7_data.security_hard_key = (uint8_t)(PHM_READ_VFPF_INDIRECT_FIELD( + hwmgr->device, CGS_IND_REG__SMC, SMU_FIRMWARE, SMU_SEL)); + + /* Check if SMU is running in protected mode */ + if (smu_data->protected_mode == 0) + result = vegam_start_smu_in_non_protection_mode(hwmgr); + else + result = vegam_start_smu_in_protection_mode(hwmgr); + + if (result != 0) + PP_ASSERT_WITH_CODE(0, "Failed to load SMU ucode.", return result); + } + + /* Setup SoftRegsStart here for register lookup in case DummyBackEnd is used and ProcessFirmwareHeader is not executed */ + smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU75_Firmware_Header, SoftRegisters), + &(smu_data->smu7_data.soft_regs_start), + 0x40000); + + result = smu7_request_smu_load_fw(hwmgr); + + return result; +} + +static int vegam_process_firmware_header(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t tmp; + int result; + bool error = false; + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, DpmTable), + &tmp, SMC_RAM_END); + + if (0 == result) + smu_data->smu7_data.dpm_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, SoftRegisters), + &tmp, SMC_RAM_END); + + if (!result) { + data->soft_regs_start = tmp; + smu_data->smu7_data.soft_regs_start = tmp; + } + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, mcRegisterTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.mc_reg_table_start = tmp; + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, FanTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.fan_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, mcArbDramTimingTable), + &tmp, SMC_RAM_END); + + if (!result) + smu_data->smu7_data.arb_table_start = tmp; + + error |= (0 != result); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, Version), + &tmp, SMC_RAM_END); + + if (!result) + hwmgr->microcode_version_info.SMC = tmp; + + error |= (0 != result); + + return error ? -1 : 0; +} + +static bool vegam_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, VOLTAGE_CONTROLLER_ON)) + ? true : false; +} + +static uint32_t vegam_get_mac_definition(uint32_t value) +{ + switch (value) { + case SMU_MAX_LEVELS_GRAPHICS: + return SMU75_MAX_LEVELS_GRAPHICS; + case SMU_MAX_LEVELS_MEMORY: + return SMU75_MAX_LEVELS_MEMORY; + case SMU_MAX_LEVELS_LINK: + return SMU75_MAX_LEVELS_LINK; + case SMU_MAX_ENTRIES_SMIO: + return SMU75_MAX_ENTRIES_SMIO; + case SMU_MAX_LEVELS_VDDC: + return SMU75_MAX_LEVELS_VDDC; + case SMU_MAX_LEVELS_VDDGFX: + return SMU75_MAX_LEVELS_VDDGFX; + case SMU_MAX_LEVELS_VDDCI: + return SMU75_MAX_LEVELS_VDDCI; + case SMU_MAX_LEVELS_MVDD: + return SMU75_MAX_LEVELS_MVDD; + case SMU_UVD_MCLK_HANDSHAKE_DISABLE: + return SMU7_UVD_MCLK_HANDSHAKE_DISABLE | + SMU7_VCE_MCLK_HANDSHAKE_DISABLE; + } + + pr_warn("can't get the mac of %x\n", value); + return 0; +} + +static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + smu_data->smc_state_table.UvdBootLevel = 0; + if (table_info->mm_dep_table->count > 0) + smu_data->smc_state_table.UvdBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + offsetof(SMU75_Discrete_DpmTable, + UvdBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0x00FFFFFF; + mm_boot_level_value |= smu_data->smc_state_table.UvdBootLevel << 24; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_UVDDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel)); + return 0; +} + +static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smu_data->smc_state_table.VceBootLevel = + (uint8_t) (table_info->mm_dep_table->count - 1); + else + smu_data->smc_state_table.VceBootLevel = 0; + + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, VceBootLevel); + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFF00FFFF; + mm_boot_level_value |= smu_data->smc_state_table.VceBootLevel << 16; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (uint32_t)1 << smu_data->smc_state_table.VceBootLevel); + return 0; +} + +static int vegam_update_samu_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t mm_boot_level_offset, mm_boot_level_value; + + + smu_data->smc_state_table.SamuBootLevel = 0; + mm_boot_level_offset = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, SamuBootLevel); + + mm_boot_level_offset /= 4; + mm_boot_level_offset *= 4; + mm_boot_level_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset); + mm_boot_level_value &= 0xFFFFFF00; + mm_boot_level_value |= smu_data->smc_state_table.SamuBootLevel << 0; + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (uint32_t)(1 << smu_data->smc_state_table.SamuBootLevel)); + return 0; +} + + +static int vegam_update_bif_smc_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + int max_entry, i; + + max_entry = (SMU75_MAX_LEVELS_LINK < pcie_table->count) ? + SMU75_MAX_LEVELS_LINK : + pcie_table->count; + /* Setup BIF_SCLK levels */ + for (i = 0; i < max_entry; i++) + smu_data->bif_sclk_table[i] = pcie_table->entries[i].pcie_sclk; + return 0; +} + +static int vegam_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type) +{ + switch (type) { + case SMU_UVD_TABLE: + vegam_update_uvd_smc_table(hwmgr); + break; + case SMU_VCE_TABLE: + vegam_update_vce_smc_table(hwmgr); + break; + case SMU_SAMU_TABLE: + vegam_update_samu_smc_table(hwmgr); + break; + case SMU_BIF_TABLE: + vegam_update_bif_smc_table(hwmgr); + break; + default: + break; + } + return 0; +} + +static void vegam_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + if (table_info && + table_info->cac_dtp_table->usPowerTuneDataSetID <= POWERTUNE_DEFAULT_SET_MAX && + table_info->cac_dtp_table->usPowerTuneDataSetID) + smu_data->power_tune_defaults = + &vegam_power_tune_data_set_array + [table_info->cac_dtp_table->usPowerTuneDataSetID - 1]; + else + smu_data->power_tune_defaults = &vegam_power_tune_data_set_array[0]; + +} + +static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t count, level; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + count = data->mvdd_voltage_table.count; + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; level++) { + table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US( + data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE); + /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/ + table->SmioTable2.Pattern[level].Smio = + (uint8_t) level; + table->Smio[level] |= + data->mvdd_voltage_table.entries[level].smio_low; + } + table->SmioMask2 = data->mvdd_voltage_table.mask_low; + + table->MvddLevelCount = (uint32_t) PP_HOST_TO_SMC_UL(count); + } + + return 0; +} + +static int vegam_populate_smc_vddci_table(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + uint32_t count, level; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + count = data->vddci_voltage_table.count; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + if (count > SMU_MAX_SMIO_LEVELS) + count = SMU_MAX_SMIO_LEVELS; + for (level = 0; level < count; ++level) { + table->SmioTable1.Pattern[level].Voltage = PP_HOST_TO_SMC_US( + data->vddci_voltage_table.entries[level].value * VOLTAGE_SCALE); + table->SmioTable1.Pattern[level].Smio = (uint8_t) level; + + table->Smio[level] |= data->vddci_voltage_table.entries[level].smio_low; + } + } + + table->SmioMask1 = data->vddci_voltage_table.mask_low; + + return 0; +} + +static int vegam_populate_cac_table(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + uint32_t count; + uint8_t index; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_voltage_lookup_table *lookup_table = + table_info->vddc_lookup_table; + /* tables is already swapped, so in order to use the value from it, + * we need to swap it back. + * We are populating vddc CAC data to BapmVddc table + * in split and merged mode + */ + for (count = 0; count < lookup_table->count; count++) { + index = phm_get_voltage_index(lookup_table, + data->vddc_voltage_table.entries[count].value); + table->BapmVddcVidLoSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_low); + table->BapmVddcVidHiSidd[count] = + convert_to_vid(lookup_table->entries[index].us_cac_mid); + table->BapmVddcVidHiSidd2[count] = + convert_to_vid(lookup_table->entries[index].us_cac_high); + } + + return 0; +} + +static int vegam_populate_smc_voltage_tables(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + vegam_populate_smc_vddci_table(hwmgr, table); + vegam_populate_smc_mvdd_table(hwmgr, table); + vegam_populate_cac_table(hwmgr, table); + + return 0; +} + +static int vegam_populate_ulv_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_Ulv *state) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + + state->CcPwrDynRm = 0; + state->CcPwrDynRm1 = 0; + + state->VddcOffset = (uint16_t) table_info->us_ulv_voltage_offset; + state->VddcOffsetVid = (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1); + + state->VddcPhase = data->vddc_phase_shed_control ^ 0x3; + + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(state->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(state->VddcOffset); + + return 0; +} + +static int vegam_populate_ulv_state(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + return vegam_populate_ulv_level(hwmgr, &table->Ulv); +} + +static int vegam_populate_smc_link_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &data->dpm_table; + int i; + + /* Index (dpm_table->pcie_speed_table.count) + * is reserved for PCIE boot level. */ + for (i = 0; i <= dpm_table->pcie_speed_table.count; i++) { + table->LinkLevel[i].PcieGenSpeed = + (uint8_t)dpm_table->pcie_speed_table.dpm_levels[i].value; + table->LinkLevel[i].PcieLaneCount = (uint8_t)encode_pcie_lane_width( + dpm_table->pcie_speed_table.dpm_levels[i].param1); + table->LinkLevel[i].EnabledForActivity = 1; + table->LinkLevel[i].SPC = (uint8_t)(data->pcie_spc_cap & 0xff); + table->LinkLevel[i].DownThreshold = PP_HOST_TO_SMC_UL(5); + table->LinkLevel[i].UpThreshold = PP_HOST_TO_SMC_UL(30); + } + + smu_data->smc_state_table.LinkLevelCount = + (uint8_t)dpm_table->pcie_speed_table.count; + +/* To Do move to hwmgr */ + data->dpm_level_enable_mask.pcie_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table); + + return 0; +} + +static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table, + uint32_t clock, SMU_VoltageLevel *voltage, uint32_t *mvdd) +{ + uint32_t i; + uint16_t vddci; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + *voltage = *mvdd = 0; + + /* clock - voltage dependency table is empty table */ + if (dep_table->count == 0) + return -EINVAL; + + for (i = 0; i < dep_table->count; i++) { + /* find first sclk bigger than request */ + if (dep_table->entries[i].clk >= clock) { + *voltage |= (dep_table->entries[i].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i].vddci) + *voltage |= (dep_table->entries[i].vddci * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else { + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + } + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i].mvdd * + VOLTAGE_SCALE; + + *voltage |= 1 << PHASES_SHIFT; + return 0; + } + } + + /* sclk is bigger than max sclk in the dependence table */ + *voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), + (dep_table->entries[i - 1].vddc - + (uint16_t)VDDC_VDDCI_DELTA)); + + if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control) + *voltage |= (data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE) << VDDCI_SHIFT; + else if (dep_table->entries[i - 1].vddci) + *voltage |= (dep_table->entries[i - 1].vddci * + VOLTAGE_SCALE) << VDDC_SHIFT; + else + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) + *mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE; + else if (dep_table->entries[i].mvdd) + *mvdd = (uint32_t) dep_table->entries[i - 1].mvdd * VOLTAGE_SCALE; + + return 0; +} + +static void vegam_get_sclk_range_table(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t i, ref_clk; + + struct pp_atom_ctrl_sclk_range_table range_table_from_vbios = { { {0} } }; + + ref_clk = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); + + if (0 == atomctrl_get_smc_sclk_range_table(hwmgr, &range_table_from_vbios)) { + for (i = 0; i < NUM_SCLK_RANGE; i++) { + table->SclkFcwRangeTable[i].vco_setting = + range_table_from_vbios.entry[i].ucVco_setting; + table->SclkFcwRangeTable[i].postdiv = + range_table_from_vbios.entry[i].ucPostdiv; + table->SclkFcwRangeTable[i].fcw_pcc = + range_table_from_vbios.entry[i].usFcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = + range_table_from_vbios.entry[i].usFcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = + range_table_from_vbios.entry[i].usRcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } + return; + } + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + smu_data->range_table[i].trans_lower_frequency = + (ref_clk * Range_Table[i].fcw_trans_lower) >> Range_Table[i].postdiv; + smu_data->range_table[i].trans_upper_frequency = + (ref_clk * Range_Table[i].fcw_trans_upper) >> Range_Table[i].postdiv; + + table->SclkFcwRangeTable[i].vco_setting = Range_Table[i].vco_setting; + table->SclkFcwRangeTable[i].postdiv = Range_Table[i].postdiv; + table->SclkFcwRangeTable[i].fcw_pcc = Range_Table[i].fcw_pcc; + + table->SclkFcwRangeTable[i].fcw_trans_upper = Range_Table[i].fcw_trans_upper; + table->SclkFcwRangeTable[i].fcw_trans_lower = Range_Table[i].fcw_trans_lower; + + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_pcc); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_upper); + CONVERT_FROM_HOST_TO_SMC_US(table->SclkFcwRangeTable[i].fcw_trans_lower); + } +} + +static int vegam_calculate_sclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, SMU_SclkSetting *sclk_setting) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct pp_atomctrl_clock_dividers_ai dividers; + uint32_t ref_clock; + uint32_t pcc_target_percent, pcc_target_freq, ss_target_percent, ss_target_freq; + uint8_t i; + int result; + uint64_t temp; + + sclk_setting->SclkFrequency = clock; + /* get the engine clock dividers for this clock value */ + result = atomctrl_get_engine_pll_dividers_ai(hwmgr, clock, ÷rs); + if (result == 0) { + sclk_setting->Fcw_int = dividers.usSclk_fcw_int; + sclk_setting->Fcw_frac = dividers.usSclk_fcw_frac; + sclk_setting->Pcc_fcw_int = dividers.usPcc_fcw_int; + sclk_setting->PllRange = dividers.ucSclkPllRange; + sclk_setting->Sclk_slew_rate = 0x400; + sclk_setting->Pcc_up_slew_rate = dividers.usPcc_fcw_slew_frac; + sclk_setting->Pcc_down_slew_rate = 0xffff; + sclk_setting->SSc_En = dividers.ucSscEnable; + sclk_setting->Fcw1_int = dividers.usSsc_fcw1_int; + sclk_setting->Fcw1_frac = dividers.usSsc_fcw1_frac; + sclk_setting->Sclk_ss_slew_rate = dividers.usSsc_fcw_slew_frac; + return result; + } + + ref_clock = amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev); + + for (i = 0; i < NUM_SCLK_RANGE; i++) { + if (clock > smu_data->range_table[i].trans_lower_frequency + && clock <= smu_data->range_table[i].trans_upper_frequency) { + sclk_setting->PllRange = i; + break; + } + } + + sclk_setting->Fcw_int = (uint16_t) + ((clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + temp = clock << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw_frac = temp & 0xffff; + + pcc_target_percent = 10; /* Hardcode 10% for now. */ + pcc_target_freq = clock - (clock * pcc_target_percent / 100); + sclk_setting->Pcc_fcw_int = (uint16_t) + ((pcc_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + + ss_target_percent = 2; /* Hardcode 2% for now. */ + sclk_setting->SSc_En = 0; + if (ss_target_percent) { + sclk_setting->SSc_En = 1; + ss_target_freq = clock - (clock * ss_target_percent / 100); + sclk_setting->Fcw1_int = (uint16_t) + ((ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv) / + ref_clock); + temp = ss_target_freq << table->SclkFcwRangeTable[sclk_setting->PllRange].postdiv; + temp <<= 0x10; + do_div(temp, ref_clock); + sclk_setting->Fcw1_frac = temp & 0xffff; + } + + return 0; +} + +static uint8_t vegam_get_sleep_divider_id_from_clock(uint32_t clock, + uint32_t clock_insr) +{ + uint8_t i; + uint32_t temp; + uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK); + + PP_ASSERT_WITH_CODE((clock >= min), + "Engine clock can't satisfy stutter requirement!", + return 0); + for (i = 31; ; i--) { + temp = clock / (i + 1); + + if (temp >= min || i == 0) + break; + } + return i; +} + +static int vegam_populate_single_graphic_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_GraphicsLevel *level) +{ + int result; + /* PP_Clocks minClocks; */ + uint32_t mvdd; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMU_SclkSetting curr_sclk_setting = { 0 }; + + result = vegam_calculate_sclk_params(hwmgr, clock, &curr_sclk_setting); + + /* populate graphics levels */ + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, clock, + &level->MinVoltage, &mvdd); + + PP_ASSERT_WITH_CODE((0 == result), + "can not find VDDC voltage value for " + "VDDC engine clock dependency table", + return result); + level->ActivityLevel = (uint16_t)(SclkDPMTuning_VEGAM >> DPMTuning_Activity_Shift); + + level->CcPwrDynRm = 0; + level->CcPwrDynRm1 = 0; + level->EnabledForActivity = 0; + level->EnabledForThrottle = 1; + level->VoltageDownHyst = 0; + level->PowerThrottle = 0; + data->display_timing.min_clock_in_sr = hwmgr->display_config->min_core_set_clock_in_sr; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) + level->DeepSleepDivId = vegam_get_sleep_divider_id_from_clock(clock, + hwmgr->display_config->min_core_set_clock_in_sr); + + level->SclkSetting = curr_sclk_setting; + + CONVERT_FROM_HOST_TO_SMC_UL(level->MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(level->CcPwrDynRm1); + CONVERT_FROM_HOST_TO_SMC_US(level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(level->SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(level->SclkSetting.Sclk_ss_slew_rate); + return 0; +} + +static int vegam_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *pcie_table = table_info->pcie_table; + uint8_t pcie_entry_cnt = (uint8_t) hw_data->dpm_table.pcie_speed_table.count; + int result = 0; + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, GraphicsLevel); + uint32_t array_size = sizeof(struct SMU75_Discrete_GraphicsLevel) * + SMU75_MAX_LEVELS_GRAPHICS; + struct SMU75_Discrete_GraphicsLevel *levels = + smu_data->smc_state_table.GraphicsLevel; + uint32_t i, max_entry; + uint8_t hightest_pcie_level_enabled = 0, + lowest_pcie_level_enabled = 0, + mid_pcie_level_enabled = 0, + count = 0; + + vegam_get_sclk_range_table(hwmgr, &(smu_data->smc_state_table)); + + for (i = 0; i < dpm_table->sclk_table.count; i++) { + + result = vegam_populate_single_graphic_level(hwmgr, + dpm_table->sclk_table.dpm_levels[i].value, + &(smu_data->smc_state_table.GraphicsLevel[i])); + if (result) + return result; + + levels[i].UpHyst = (uint8_t) + (SclkDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift); + levels[i].DownHyst = (uint8_t) + (SclkDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift); + /* Making sure only DPM level 0-1 have Deep Sleep Div ID populated. */ + if (i > 1) + levels[i].DeepSleepDivId = 0; + } + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SPLLShutdownSupport)) + smu_data->smc_state_table.GraphicsLevel[0].SclkSetting.SSc_En = 0; + + smu_data->smc_state_table.GraphicsDpmLevelCount = + (uint8_t)dpm_table->sclk_table.count; + hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->sclk_table); + + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].EnabledForActivity = + (hw_data->dpm_level_enable_mask.sclk_dpm_enable_mask >> i) & 0x1; + + if (pcie_table != NULL) { + PP_ASSERT_WITH_CODE((1 <= pcie_entry_cnt), + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + max_entry = pcie_entry_cnt - 1; + for (i = 0; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = + (uint8_t) ((i < max_entry) ? i : max_entry); + } else { + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (hightest_pcie_level_enabled + 1))) != 0)) + hightest_pcie_level_enabled++; + + while (hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << lowest_pcie_level_enabled)) == 0)) + lowest_pcie_level_enabled++; + + while ((count < hightest_pcie_level_enabled) && + ((hw_data->dpm_level_enable_mask.pcie_dpm_enable_mask & + (1 << (lowest_pcie_level_enabled + 1 + count))) == 0)) + count++; + + mid_pcie_level_enabled = (lowest_pcie_level_enabled + 1 + count) < + hightest_pcie_level_enabled ? + (lowest_pcie_level_enabled + 1 + count) : + hightest_pcie_level_enabled; + + /* set pcieDpmLevel to hightest_pcie_level_enabled */ + for (i = 2; i < dpm_table->sclk_table.count; i++) + levels[i].pcieDpmLevel = hightest_pcie_level_enabled; + + /* set pcieDpmLevel to lowest_pcie_level_enabled */ + levels[0].pcieDpmLevel = lowest_pcie_level_enabled; + + /* set pcieDpmLevel to mid_pcie_level_enabled */ + levels[1].pcieDpmLevel = mid_pcie_level_enabled; + } + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +static int vegam_calculate_mclk_params(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level) +{ + struct pp_atomctrl_memory_clock_param_ai mpll_param; + + PP_ASSERT_WITH_CODE(!atomctrl_get_memory_pll_dividers_ai(hwmgr, + clock, &mpll_param), + "Failed to retrieve memory pll parameter.", + return -EINVAL); + + mem_level->MclkFrequency = (uint32_t)mpll_param.ulClock; + mem_level->Fcw_int = (uint16_t)mpll_param.ulMclk_fcw_int; + mem_level->Fcw_frac = (uint16_t)mpll_param.ulMclk_fcw_frac; + mem_level->Postdiv = (uint8_t)mpll_param.ulPostDiv; + + return 0; +} + +static int vegam_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t clock, struct SMU75_Discrete_MemoryLevel *mem_level) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + int result = 0; + uint32_t mclk_stutter_mode_threshold = 60000; + + + if (table_info->vdd_dep_on_mclk) { + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, clock, + &mem_level->MinVoltage, &mem_level->MinMvdd); + PP_ASSERT_WITH_CODE(!result, + "can not find MinVddc voltage value from memory " + "VDDC voltage dependency table", return result); + } + + result = vegam_calculate_mclk_params(hwmgr, clock, mem_level); + PP_ASSERT_WITH_CODE(!result, + "Failed to calculate mclk params.", + return -EINVAL); + + mem_level->EnabledForThrottle = 1; + mem_level->EnabledForActivity = 0; + mem_level->VoltageDownHyst = 0; + mem_level->ActivityLevel = (uint16_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Activity_Shift); + mem_level->StutterEnable = false; + mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; + + data->display_timing.num_existing_displays = hwmgr->display_config->num_display; + + if (mclk_stutter_mode_threshold && + (clock <= mclk_stutter_mode_threshold) && + (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, + STUTTER_ENABLE) & 0x1)) + mem_level->StutterEnable = true; + + if (!result) { + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinMvdd); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(mem_level->ActivityLevel); + CONVERT_FROM_HOST_TO_SMC_UL(mem_level->MinVoltage); + } + + return result; +} + +static int vegam_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct smu7_dpm_table *dpm_table = &hw_data->dpm_table; + int result; + /* populate MCLK dpm table to SMU7 */ + uint32_t array = smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, MemoryLevel); + uint32_t array_size = sizeof(SMU75_Discrete_MemoryLevel) * + SMU75_MAX_LEVELS_MEMORY; + struct SMU75_Discrete_MemoryLevel *levels = + smu_data->smc_state_table.MemoryLevel; + uint32_t i; + + for (i = 0; i < dpm_table->mclk_table.count; i++) { + PP_ASSERT_WITH_CODE((0 != dpm_table->mclk_table.dpm_levels[i].value), + "can not populate memory level as memory clock is zero", + return -EINVAL); + result = vegam_populate_single_memory_level(hwmgr, + dpm_table->mclk_table.dpm_levels[i].value, + &levels[i]); + + if (result) + return result; + + levels[i].UpHyst = (uint8_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Uphyst_Shift); + levels[i].DownHyst = (uint8_t) + (MemoryDPMTuning_VEGAM >> DPMTuning_Downhyst_Shift); + } + + smu_data->smc_state_table.MemoryDpmLevelCount = + (uint8_t)dpm_table->mclk_table.count; + hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask = + phm_get_dpm_level_enable_mask_value(&dpm_table->mclk_table); + + for (i = 0; i < dpm_table->mclk_table.count; i++) + levels[i].EnabledForActivity = + (hw_data->dpm_level_enable_mask.mclk_dpm_enable_mask >> i) & 0x1; + + levels[dpm_table->mclk_table.count - 1].DisplayWatermark = + PPSMC_DISPLAY_WATERMARK_HIGH; + + /* level count will send to smc once at init smc table and never change */ + result = smu7_copy_bytes_to_smc(hwmgr, array, (uint8_t *)levels, + (uint32_t)array_size, SMC_RAM_END); + + return result; +} + +static int vegam_populate_mvdd_value(struct pp_hwmgr *hwmgr, + uint32_t mclk, SMIO_Pattern *smio_pat) +{ + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint32_t i = 0; + + if (SMU7_VOLTAGE_CONTROL_NONE != data->mvdd_control) { + /* find mvdd value which clock is more than request */ + for (i = 0; i < table_info->vdd_dep_on_mclk->count; i++) { + if (mclk <= table_info->vdd_dep_on_mclk->entries[i].clk) { + smio_pat->Voltage = data->mvdd_voltage_table.entries[i].value; + break; + } + } + PP_ASSERT_WITH_CODE(i < table_info->vdd_dep_on_mclk->count, + "MVDD Voltage is outside the supported range.", + return -EINVAL); + } else + return -EINVAL; + + return 0; +} + +static int vegam_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + int result = 0; + uint32_t sclk_frequency; + const struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + SMIO_Pattern vol_level; + uint32_t mvdd; + uint16_t us_mvdd; + + table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC; + + /* Get MinVoltage and Frequency from DPM0, + * already converted to SMC_UL */ + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_sclk, + sclk_frequency, + &table->ACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE(!result, + "Cannot find ACPI VDDC voltage value " + "in Clock Dependency Table", + ); + + result = vegam_calculate_sclk_params(hwmgr, sclk_frequency, + &(table->ACPILevel.SclkSetting)); + PP_ASSERT_WITH_CODE(!result, + "Error retrieving Engine Clock dividers from VBIOS.", + return result); + + table->ACPILevel.DeepSleepDivId = 0; + table->ACPILevel.CcPwrDynRm = 0; + table->ACPILevel.CcPwrDynRm1 = 0; + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.Flags); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.MinVoltage); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm); + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.CcPwrDynRm1); + + CONVERT_FROM_HOST_TO_SMC_UL(table->ACPILevel.SclkSetting.SclkFrequency); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_fcw_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_up_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Pcc_down_slew_rate); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_int); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Fcw1_frac); + CONVERT_FROM_HOST_TO_SMC_US(table->ACPILevel.SclkSetting.Sclk_ss_slew_rate); + + + /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; + result = vegam_get_dependency_volt_by_clk(hwmgr, + table_info->vdd_dep_on_mclk, + table->MemoryACPILevel.MclkFrequency, + &table->MemoryACPILevel.MinVoltage, &mvdd); + PP_ASSERT_WITH_CODE((0 == result), + "Cannot find ACPI VDDCI voltage value " + "in Clock Dependency Table", + ); + + us_mvdd = 0; + if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) || + (data->mclk_dpm_key_disabled)) + us_mvdd = data->vbios_boot_state.mvdd_bootup_value; + else { + if (!vegam_populate_mvdd_value(hwmgr, + data->dpm_table.mclk_table.dpm_levels[0].value, + &vol_level)) + us_mvdd = vol_level.Voltage; + } + + if (!vegam_populate_mvdd_value(hwmgr, 0, &vol_level)) + table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage); + else + table->MemoryACPILevel.MinMvdd = 0; + + table->MemoryACPILevel.StutterEnable = false; + + table->MemoryACPILevel.EnabledForThrottle = 0; + table->MemoryACPILevel.EnabledForActivity = 0; + table->MemoryACPILevel.UpHyst = 0; + table->MemoryACPILevel.DownHyst = 100; + table->MemoryACPILevel.VoltageDownHyst = 0; + table->MemoryACPILevel.ActivityLevel = + PP_HOST_TO_SMC_US(data->current_profile_setting.mclk_activity); + + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->MemoryACPILevel.MinVoltage); + + return result; +} + +static int vegam_populate_smc_vce_level(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->VceLevelCount = (uint8_t)(mm_table->count); + table->VceBootLevel = 0; + + for (count = 0; count < table->VceLevelCount; count++) { + table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; + table->VceLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + + table->VceLevel[count].MinVoltage |= + (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->VceLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /*retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->VceLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for VCE engine clock", + return result); + + table->VceLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->VceLevel[count].MinVoltage); + } + return result; +} + +static int vegam_populate_smc_samu_level(struct pp_hwmgr *hwmgr, + SMU75_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->SamuBootLevel = 0; + table->SamuLevelCount = (uint8_t)(mm_table->count); + + for (count = 0; count < table->SamuLevelCount; count++) { + /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; + table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; + table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * + VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->SamuLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->SamuLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->SamuLevel[count].Frequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for samu clock", return result); + + table->SamuLevel[count].Divider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].Frequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->SamuLevel[count].MinVoltage); + } + return result; +} + +static int vegam_populate_memory_timing_parameters(struct pp_hwmgr *hwmgr, + int32_t eng_clock, int32_t mem_clock, + SMU75_Discrete_MCArbDramTimingTableEntry *arb_regs) +{ + uint32_t dram_timing; + uint32_t dram_timing2; + uint32_t burst_time; + uint32_t rfsh_rate; + uint32_t misc3; + + int result; + + result = atomctrl_set_engine_dram_timings_rv770(hwmgr, + eng_clock, mem_clock); + PP_ASSERT_WITH_CODE(result == 0, + "Error calling VBIOS to set DRAM_TIMING.", + return result); + + dram_timing = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING); + dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2); + burst_time = cgs_read_register(hwmgr->device, mmMC_ARB_BURST_TIME); + rfsh_rate = cgs_read_register(hwmgr->device, mmMC_ARB_RFSH_RATE); + misc3 = cgs_read_register(hwmgr->device, mmMC_ARB_MISC3); + + arb_regs->McArbDramTiming = PP_HOST_TO_SMC_UL(dram_timing); + arb_regs->McArbDramTiming2 = PP_HOST_TO_SMC_UL(dram_timing2); + arb_regs->McArbBurstTime = PP_HOST_TO_SMC_UL(burst_time); + arb_regs->McArbRfshRate = PP_HOST_TO_SMC_UL(rfsh_rate); + arb_regs->McArbMisc3 = PP_HOST_TO_SMC_UL(misc3); + + return 0; +} + +static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0}; + uint32_t i, j; + int result = 0; + + for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) { + for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) { + result = vegam_populate_memory_timing_parameters(hwmgr, + hw_data->dpm_table.sclk_table.dpm_levels[i].value, + hw_data->dpm_table.mclk_table.dpm_levels[j].value, + &arb_regs.entries[i][j]); + if (result) + return result; + } + } + + result = smu7_copy_bytes_to_smc( + hwmgr, + smu_data->smu7_data.arb_table_start, + (uint8_t *)&arb_regs, + sizeof(SMU75_Discrete_MCArbDramTimingTable), + SMC_RAM_END); + return result; +} + +static int vegam_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + int result = -EINVAL; + uint8_t count; + struct pp_atomctrl_clock_dividers_vi dividers; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t vddci; + + table->UvdLevelCount = (uint8_t)(mm_table->count); + table->UvdBootLevel = 0; + + for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; + table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; + table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; + table->UvdLevel[count].MinVoltage |= + (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; + + if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) + vddci = (uint32_t)phm_find_closest_vddci(&(data->vddci_voltage_table), + mm_table->entries[count].vddc - VDDC_VDDCI_DELTA); + else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) + vddci = mm_table->entries[count].vddc - VDDC_VDDCI_DELTA; + else + vddci = (data->vbios_boot_state.vddci_bootup_value * VOLTAGE_SCALE) << VDDCI_SHIFT; + + table->UvdLevel[count].MinVoltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + table->UvdLevel[count].MinVoltage |= 1 << PHASES_SHIFT; + + /* retrieve divider value for VBIOS */ + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].VclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Vclk clock", return result); + + table->UvdLevel[count].VclkDivider = (uint8_t)dividers.pll_post_divider; + + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + table->UvdLevel[count].DclkFrequency, ÷rs); + PP_ASSERT_WITH_CODE((0 == result), + "can not find divide id for Dclk clock", return result); + + table->UvdLevel[count].DclkDivider = (uint8_t)dividers.pll_post_divider; + + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].VclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].DclkFrequency); + CONVERT_FROM_HOST_TO_SMC_UL(table->UvdLevel[count].MinVoltage); + } + + return result; +} + +static int vegam_populate_smc_boot_level(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + int result = 0; + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + table->GraphicsBootLevel = 0; + table->MemoryBootLevel = 0; + + /* find boot level from dpm table */ + result = phm_find_boot_level(&(data->dpm_table.sclk_table), + data->vbios_boot_state.sclk_bootup_value, + (uint32_t *)&(table->GraphicsBootLevel)); + + result = phm_find_boot_level(&(data->dpm_table.mclk_table), + data->vbios_boot_state.mclk_bootup_value, + (uint32_t *)&(table->MemoryBootLevel)); + + table->BootVddc = data->vbios_boot_state.vddc_bootup_value * + VOLTAGE_SCALE; + table->BootVddci = data->vbios_boot_state.vddci_bootup_value * + VOLTAGE_SCALE; + table->BootMVdd = data->vbios_boot_state.mvdd_bootup_value * + VOLTAGE_SCALE; + + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddc); + CONVERT_FROM_HOST_TO_SMC_US(table->BootVddci); + CONVERT_FROM_HOST_TO_SMC_US(table->BootMVdd); + + return 0; +} + +static int vegam_populate_smc_initial_state(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint8_t count, level; + + count = (uint8_t)(table_info->vdd_dep_on_sclk->count); + + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_sclk->entries[level].clk >= + hw_data->vbios_boot_state.sclk_bootup_value) { + smu_data->smc_state_table.GraphicsBootLevel = level; + break; + } + } + + count = (uint8_t)(table_info->vdd_dep_on_mclk->count); + for (level = 0; level < count; level++) { + if (table_info->vdd_dep_on_mclk->entries[level].clk >= + hw_data->vbios_boot_state.mclk_bootup_value) { + smu_data->smc_state_table.MemoryBootLevel = level; + break; + } + } + + return 0; +} + +static uint16_t scale_fan_gain_settings(uint16_t raw_setting) +{ + uint32_t tmp; + tmp = raw_setting * 4096 / 100; + return (uint16_t)tmp; +} + +static int vegam_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_cac_tdp_table *cac_dtp_table = table_info->cac_dtp_table; + struct pp_advance_fan_control_parameters *fan_table = + &hwmgr->thermal_controller.advanceFanControlParameters; + int i, j, k; + const uint16_t *pdef1; + const uint16_t *pdef2; + + table->DefaultTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + table->TargetTdp = PP_HOST_TO_SMC_US((uint16_t)(cac_dtp_table->usTDP * 128)); + + PP_ASSERT_WITH_CODE(cac_dtp_table->usTargetOperatingTemp <= 255, + "Target Operating Temp is out of Range!", + ); + + table->TemperatureLimitEdge = PP_HOST_TO_SMC_US( + cac_dtp_table->usTargetOperatingTemp * 256); + table->TemperatureLimitHotspot = PP_HOST_TO_SMC_US( + cac_dtp_table->usTemperatureLimitHotspot * 256); + table->FanGainEdge = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainEdge)); + table->FanGainHotspot = PP_HOST_TO_SMC_US( + scale_fan_gain_settings(fan_table->usFanGainHotspot)); + + pdef1 = defaults->BAPMTI_R; + pdef2 = defaults->BAPMTI_RC; + + for (i = 0; i < SMU75_DTE_ITERATIONS; i++) { + for (j = 0; j < SMU75_DTE_SOURCES; j++) { + for (k = 0; k < SMU75_DTE_SINKS; k++) { + table->BAPMTI_R[i][j][k] = PP_HOST_TO_SMC_US(*pdef1); + table->BAPMTI_RC[i][j][k] = PP_HOST_TO_SMC_US(*pdef2); + pdef1++; + pdef2++; + } + } + } + + return 0; +} + +static int vegam_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) +{ + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + uint32_t mask = (1 << ((STRAP_ASIC_RO_MSB - STRAP_ASIC_RO_LSB) + 1)) - 1; + + stretch_amount = (uint8_t)table_info->cac_dtp_table->usClockStretchAmount; + + atomctrl_read_efuse(hwmgr, STRAP_ASIC_RO_LSB, STRAP_ASIC_RO_MSB, + mask, &efuse); + + min = 1200; + max = 2500; + + ro = efuse * (max - min) / 255 + min; + + /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ + for (i = 0; i < sclk_table->count; i++) { + smu_data->smc_state_table.Sclk_CKS_masterEn0_7 |= + sclk_table->entries[i].cks_enable << i; + volt_without_cks = (uint32_t)((2753594000U + (sclk_table->entries[i].clk/100) * + 136418 - (ro - 70) * 1000000) / + (2424180 - (sclk_table->entries[i].clk/100) * 1132925/1000)); + volt_with_cks = (uint32_t)((2797202000U + sclk_table->entries[i].clk/100 * + 3232 - (ro - 65) * 1000000) / + (2522480 - sclk_table->entries[i].clk/100 * 115764/100)); + + if (volt_without_cks >= volt_with_cks) + volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + + sclk_table->entries[i].cks_voffset) * 100 + 624) / 625); + + smu_data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; + } + + smu_data->smc_state_table.LdoRefSel = + (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? + table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 5; + /* Populate CKS Lookup Table */ + if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) + stretch_amount2 = 0; + else if (stretch_amount == 3 || stretch_amount == 4) + stretch_amount2 = 1; + else { + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + PP_ASSERT_WITH_CODE(false, + "Stretch Amount in PPTable not supported\n", + return -EINVAL); + } + + value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); + value &= 0xFFFFFFFE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); + + return 0; +} + +static bool vegam_is_hw_avfs_present(struct pp_hwmgr *hwmgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixSMU_EFUSE_0 + (49 * 4)); + efuse &= 0x00000001; + + if (efuse) + return true; + + return false; +} + +static int vegam_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + if (!hwmgr->avfs_supported) + return 0; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = + PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_FUSE_TABLE[0].m1 = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_FUSE_TABLE[0].m2 = + PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_FUSE_TABLE[0].b = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_FUSE_TABLE[0].m1_shift = 24; + table->AVFSGB_FUSE_TABLE[0].m2_shift = 12; + table->AVFSGB_FUSE_TABLE[1].m1 = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_FUSE_TABLE[1].m2 = + PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_FUSE_TABLE[1].b = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_FUSE_TABLE[1].m1_shift = 24; + table->AVFSGB_FUSE_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = + PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = + PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = + PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < sclk_table->count; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = + (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = + PP_HOST_TO_SMC_US((uint16_t) + (sclk_table->entries[i].sclk_offset) / 100); + } + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, AvfsMeanNSigma), + &tmp, SMC_RAM_END); + smu7_copy_bytes_to_smc(hwmgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + SMC_RAM_END); + + result = smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, AvfsSclkOffsetTable), + &tmp, SMC_RAM_END); + smu7_copy_bytes_to_smc(hwmgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + SMC_RAM_END); + + data->avfs_vdroop_override_setting = + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = + (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + +static int vegam_populate_vr_config(struct pp_hwmgr *hwmgr, + struct SMU75_Discrete_DpmTable *table) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + uint16_t config; + + config = VR_MERGED_WITH_VDDC; + table->VRConfig |= (config << VRCONF_VDDGFX_SHIFT); + + /* Set Vddc Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) { + config = VR_SVI2_PLANE_1; + table->VRConfig |= config; + } else { + PP_ASSERT_WITH_CODE(false, + "VDDC should be on SVI2 control in merged mode!", + ); + } + /* Set Vddci Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) { + config = VR_SVI2_PLANE_2; /* only in merged mode */ + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) { + config = VR_SMIO_PATTERN_1; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_VDDCI_SHIFT); + } + /* Set Mvdd Voltage Controller */ + if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { + if (config != VR_SVI2_PLANE_2) { + config = VR_SVI2_PLANE_2; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + smu_data->smu7_data.soft_regs_start + + offsetof(SMU75_SoftRegisters, AllowMvddSwitch), + 0x1); + } else { + PP_ASSERT_WITH_CODE(false, + "SVI2 Plane 2 is already taken, set MVDD as Static",); + config = VR_STATIC_VOLTAGE; + table->VRConfig = (config << VRCONF_MVDD_SHIFT); + } + } else if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { + config = VR_SMIO_PATTERN_2; + table->VRConfig = (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, + CGS_IND_REG__SMC, + smu_data->smu7_data.soft_regs_start + + offsetof(SMU75_SoftRegisters, AllowMvddSwitch), + 0x1); + } else { + config = VR_STATIC_VOLTAGE; + table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + } + + return 0; +} + +static int vegam_populate_svi_load_line(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + + smu_data->power_tune_table.SviLoadLineEn = defaults->SviLoadLineEn; + smu_data->power_tune_table.SviLoadLineVddC = defaults->SviLoadLineVddC; + smu_data->power_tune_table.SviLoadLineTrimVddC = 3; + smu_data->power_tune_table.SviLoadLineOffsetVddC = 0; + + return 0; +} + +static int vegam_populate_tdc_limit(struct pp_hwmgr *hwmgr) +{ + uint16_t tdc_limit; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + + tdc_limit = (uint16_t)(table_info->cac_dtp_table->usTDC * 128); + smu_data->power_tune_table.TDC_VDDC_PkgLimit = + CONVERT_FROM_HOST_TO_SMC_US(tdc_limit); + smu_data->power_tune_table.TDC_VDDC_ThrottleReleaseLimitPerc = + defaults->TDC_VDDC_ThrottleReleaseLimitPerc; + smu_data->power_tune_table.TDC_MAWt = defaults->TDC_MAWt; + + return 0; +} + +static int vegam_populate_dw8(struct pp_hwmgr *hwmgr, uint32_t fuse_table_offset) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + const struct vegam_pt_defaults *defaults = smu_data->power_tune_defaults; + uint32_t temp; + + if (smu7_read_smc_sram_dword(hwmgr, + fuse_table_offset + + offsetof(SMU75_Discrete_PmFuses, TdcWaterfallCtl), + (uint32_t *)&temp, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to read PmFuses.DW6 (SviLoadLineEn) from SMC Failed!", + return -EINVAL); + else { + smu_data->power_tune_table.TdcWaterfallCtl = defaults->TdcWaterfallCtl; + smu_data->power_tune_table.LPMLTemperatureMin = + (uint8_t)((temp >> 16) & 0xff); + smu_data->power_tune_table.LPMLTemperatureMax = + (uint8_t)((temp >> 8) & 0xff); + smu_data->power_tune_table.Reserved = (uint8_t)(temp & 0xff); + } + return 0; +} + +static int vegam_populate_temperature_scaler(struct pp_hwmgr *hwmgr) +{ + int i; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.LPMLTemperatureScaler[i] = 0; + + return 0; +} + +static int vegam_populate_fuzzy_fan(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + +/* TO DO move to hwmgr */ + if ((hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity & (1 << 15)) + || 0 == hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity) + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + hwmgr->thermal_controller.advanceFanControlParameters.usDefaultFanOutputSensitivity; + + smu_data->power_tune_table.FuzzyFan_PwmSetDelta = PP_HOST_TO_SMC_US( + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity); + return 0; +} + +static int vegam_populate_gnb_lpml(struct pp_hwmgr *hwmgr) +{ + int i; + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + /* Currently not used. Set all to zero. */ + for (i = 0; i < 16; i++) + smu_data->power_tune_table.GnbLPML[i] = 0; + + return 0; +} + +static int vegam_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + uint16_t hi_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd; + uint16_t lo_sidd = smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd; + struct phm_cac_tdp_table *cac_table = table_info->cac_dtp_table; + + hi_sidd = (uint16_t)(cac_table->usHighCACLeakage / 100 * 256); + lo_sidd = (uint16_t)(cac_table->usLowCACLeakage / 100 * 256); + + smu_data->power_tune_table.BapmVddCBaseLeakageHiSidd = + CONVERT_FROM_HOST_TO_SMC_US(hi_sidd); + smu_data->power_tune_table.BapmVddCBaseLeakageLoSidd = + CONVERT_FROM_HOST_TO_SMC_US(lo_sidd); + + return 0; +} + +static int vegam_populate_pm_fuses(struct pp_hwmgr *hwmgr) +{ + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + uint32_t pm_fuse_table_offset; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (smu7_read_smc_sram_dword(hwmgr, + SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU75_Firmware_Header, PmFuseTable), + &pm_fuse_table_offset, SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to get pm_fuse_table_offset Failed!", + return -EINVAL); + + if (vegam_populate_svi_load_line(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate SviLoadLine Failed!", + return -EINVAL); + + if (vegam_populate_tdc_limit(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TDCLimit Failed!", return -EINVAL); + + if (vegam_populate_dw8(hwmgr, pm_fuse_table_offset)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate TdcWaterfallCtl, " + "LPMLTemperature Min and Max Failed!", + return -EINVAL); + + if (0 != vegam_populate_temperature_scaler(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate LPMLTemperatureScaler Failed!", + return -EINVAL); + + if (vegam_populate_fuzzy_fan(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate Fuzzy Fan Control parameters Failed!", + return -EINVAL); + + if (vegam_populate_gnb_lpml(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate GnbLPML Failed!", + return -EINVAL); + + if (vegam_populate_bapm_vddc_base_leakage_sidd(hwmgr)) + PP_ASSERT_WITH_CODE(false, + "Attempt to populate BapmVddCBaseLeakage Hi and Lo " + "Sidd Failed!", return -EINVAL); + + if (smu7_copy_bytes_to_smc(hwmgr, pm_fuse_table_offset, + (uint8_t *)&smu_data->power_tune_table, + (sizeof(struct SMU75_Discrete_PmFuses) - PMFUSES_AVFSSIZE), + SMC_RAM_END)) + PP_ASSERT_WITH_CODE(false, + "Attempt to download PmFuseTable Failed!", + return -EINVAL); + } + return 0; +} + +static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr) +{ + struct amdgpu_device *adev = hwmgr->adev; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnableModeSwitchRLCNotification, + adev->gfx.cu_info.number); + + return 0; +} + +static int vegam_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)(hwmgr->pptable); + struct SMU75_Discrete_DpmTable *table = &(smu_data->smc_state_table); + uint8_t i; + struct pp_atomctrl_gpio_pin_assignment gpio_pin; + struct phm_ppt_v1_gpio_table *gpio_table = + (struct phm_ppt_v1_gpio_table *)table_info->gpio_table; + pp_atomctrl_clock_dividers_vi dividers; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + vegam_initialize_power_tune_defaults(hwmgr); + + if (SMU7_VOLTAGE_CONTROL_NONE != hw_data->voltage_control) + vegam_populate_smc_voltage_tables(hwmgr, table); + + table->SystemFlags = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StepVddc)) + table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (hw_data->is_memory_gddr5) + table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (hw_data->ulv_supported && table_info->us_ulv_voltage_offset) { + result = vegam_populate_ulv_state(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ULV state!", return result); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_ULV_PARAMETER, SMU7_CGULVPARAMETER_DFLT); + } + + result = vegam_populate_smc_link_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Link Level!", return result); + + result = vegam_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Graphics Level!", return result); + + result = vegam_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Memory Level!", return result); + + result = vegam_populate_smc_acpi_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ACPI Level!", return result); + + result = vegam_populate_smc_vce_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize VCE Level!", return result); + + result = vegam_populate_smc_samu_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize SAMU Level!", return result); + + /* Since only the initial state is completely set up at this point + * (the other states are just copies of the boot state) we only + * need to populate the ARB settings for the initial state. + */ + result = vegam_program_memory_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to Write ARB settings for the initial state.", return result); + + result = vegam_populate_smc_uvd_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize UVD Level!", return result); + + result = vegam_populate_smc_boot_level(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Boot Level!", return result); + + result = vegam_populate_smc_initial_state(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Boot State!", return result); + + result = vegam_populate_bapm_parameters_in_dpm_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate BAPM Parameters!", return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = vegam_populate_clock_stretcher_data_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate Clock Stretcher Data Table!", + return result); + } + + result = vegam_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate AVFS Parameters!", return result;); + + table->CurrSclkPllRange = 0xff; + table->GraphicsVoltageChangeEnable = 1; + table->GraphicsThermThrottleEnable = 1; + table->GraphicsInterval = 1; + table->VoltageInterval = 1; + table->ThermalInterval = 1; + table->TemperatureLimitHigh = + table_info->cac_dtp_table->usTargetOperatingTemp * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->TemperatureLimitLow = + (table_info->cac_dtp_table->usTargetOperatingTemp - 1) * + SMU7_Q88_FORMAT_CONVERSION_UNIT; + table->MemoryVoltageChangeEnable = 1; + table->MemoryInterval = 1; + table->VoltageResponseTime = 0; + table->PhaseResponseTime = 0; + table->MemoryThermThrottleEnable = 1; + + PP_ASSERT_WITH_CODE(hw_data->dpm_table.pcie_speed_table.count >= 1, + "There must be 1 or more PCIE levels defined in PPTable.", + return -EINVAL); + table->PCIeBootLinkLevel = + hw_data->dpm_table.pcie_speed_table.count; + table->PCIeGenInterval = 1; + table->VRConfig = 0; + + result = vegam_populate_vr_config(hwmgr, table); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate VRConfig setting!", return result); + + table->ThermGpio = 17; + table->SclkStepSize = 0x4000; + + if (atomctrl_get_pp_assign_pin(hwmgr, + VDDC_VRHOT_GPIO_PINID, &gpio_pin)) { + table->VRHotGpio = gpio_pin.uc_gpio_pin_bit_shift; + if (gpio_table) + table->VRHotLevel = + table_info->gpio_table->vrhot_triggered_sclk_dpm_index; + } else { + table->VRHotGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + } + + if (atomctrl_get_pp_assign_pin(hwmgr, + PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin)) { + table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition) && + !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme); + } else { + table->AcDcGpio = SMU7_UNUSED_GPIO_PIN; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + } + + /* Thermal Output GPIO */ + if (atomctrl_get_pp_assign_pin(hwmgr, + THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin)) { + table->ThermOutGpio = gpio_pin.uc_gpio_pin_bit_shift; + + /* For porlarity read GPIOPAD_A with assigned Gpio pin + * since VBIOS will program this register to set 'inactive state', + * driver can then determine 'active state' from this and + * program SMU with correct polarity + */ + table->ThermOutPolarity = + (0 == (cgs_read_register(hwmgr->device, mmGPIOPAD_A) & + (1 << gpio_pin.uc_gpio_pin_bit_shift))) ? 1:0; + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_ONLY; + + /* if required, combine VRHot/PCC with thermal out GPIO */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CombinePCCWithThermalSignal)) + table->ThermOutMode = SMU7_THERM_OUT_MODE_THERM_VRHOT; + } else { + table->ThermOutGpio = 17; + table->ThermOutPolarity = 1; + table->ThermOutMode = SMU7_THERM_OUT_MODE_DISABLE; + } + + /* Populate BIF_SCLK levels into SMC DPM table */ + for (i = 0; i <= hw_data->dpm_table.pcie_speed_table.count; i++) { + result = atomctrl_get_dfs_pll_dividers_vi(hwmgr, + smu_data->bif_sclk_table[i], ÷rs); + PP_ASSERT_WITH_CODE(!result, + "Can not find DFS divide id for Sclk", + return result); + + if (i == 0) + table->Ulv.BifSclkDfs = + PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider)); + else + table->LinkLevel[i - 1].BifSclkDfs = + PP_HOST_TO_SMC_US((uint16_t)(dividers.pll_post_divider)); + } + + for (i = 0; i < SMU75_MAX_ENTRIES_SMIO; i++) + table->Smio[i] = PP_HOST_TO_SMC_UL(table->Smio[i]); + + CONVERT_FROM_HOST_TO_SMC_UL(table->SystemFlags); + CONVERT_FROM_HOST_TO_SMC_UL(table->VRConfig); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1); + CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2); + CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize); + CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh); + CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow); + CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime); + CONVERT_FROM_HOST_TO_SMC_US(table->PhaseResponseTime); + + /* Upload all dpm data to SMC memory.(dpm level, dpm level count etc) */ + result = smu7_copy_bytes_to_smc(hwmgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, SystemFlags), + (uint8_t *)&(table->SystemFlags), + sizeof(SMU75_Discrete_DpmTable) - 3 * sizeof(SMU75_PIDController), + SMC_RAM_END); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload dpm data to SMC memory!", return result); + + result = vegam_populate_pm_fuses(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate PM fuses to SMC memory!", return result); + + result = vegam_enable_reconfig_cus(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to enable reconfigurable CUs!", return result); + + return 0; +} + +static uint32_t vegam_get_offsetof(uint32_t type, uint32_t member) +{ + switch (type) { + case SMU_SoftRegisters: + switch (member) { + case HandshakeDisables: + return offsetof(SMU75_SoftRegisters, HandshakeDisables); + case VoltageChangeTimeout: + return offsetof(SMU75_SoftRegisters, VoltageChangeTimeout); + case AverageGraphicsActivity: + return offsetof(SMU75_SoftRegisters, AverageGraphicsActivity); + case PreVBlankGap: + return offsetof(SMU75_SoftRegisters, PreVBlankGap); + case VBlankTimeout: + return offsetof(SMU75_SoftRegisters, VBlankTimeout); + case UcodeLoadStatus: + return offsetof(SMU75_SoftRegisters, UcodeLoadStatus); + case DRAM_LOG_ADDR_H: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_H); + case DRAM_LOG_ADDR_L: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_ADDR_L); + case DRAM_LOG_PHY_ADDR_H: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_H); + case DRAM_LOG_PHY_ADDR_L: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_PHY_ADDR_L); + case DRAM_LOG_BUFF_SIZE: + return offsetof(SMU75_SoftRegisters, DRAM_LOG_BUFF_SIZE); + } + case SMU_Discrete_DpmTable: + switch (member) { + case UvdBootLevel: + return offsetof(SMU75_Discrete_DpmTable, UvdBootLevel); + case VceBootLevel: + return offsetof(SMU75_Discrete_DpmTable, VceBootLevel); + case SamuBootLevel: + return offsetof(SMU75_Discrete_DpmTable, SamuBootLevel); + case LowSclkInterruptThreshold: + return offsetof(SMU75_Discrete_DpmTable, LowSclkInterruptThreshold); + } + } + pr_warn("can't get the offset of type %x member %x\n", type, member); + return 0; +} + +static int vegam_program_mem_timing_parameters(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + + if (data->need_update_smu7_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + + DPMTABLE_UPDATE_SCLK + + DPMTABLE_UPDATE_MCLK)) + return vegam_program_memory_timing_parameters(hwmgr); + + return 0; +} + +static int vegam_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + struct vegam_smumgr *smu_data = + (struct vegam_smumgr *)(hwmgr->smu_backend); + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (data->low_sclk_interrupt_threshold != 0)) { + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + CONVERT_FROM_HOST_TO_SMC_UL(low_sclk_interrupt_threshold); + + result = smu7_copy_bytes_to_smc( + hwmgr, + smu_data->smu7_data.dpm_table_start + + offsetof(SMU75_Discrete_DpmTable, + LowSclkInterruptThreshold), + (uint8_t *)&low_sclk_interrupt_threshold, + sizeof(uint32_t), + SMC_RAM_END); + } + PP_ASSERT_WITH_CODE((result == 0), + "Failed to update SCLK threshold!", return result); + + result = vegam_program_mem_timing_parameters(hwmgr); + PP_ASSERT_WITH_CODE((result == 0), + "Failed to program memory timing parameters!", + ); + + return result; +} + +int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + int ret; + + if (!hwmgr->avfs_supported) + return 0; + + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs); + if (!ret) { + if (data->apply_avfs_cks_off_voltage) + ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage); + } + + return ret; +} + +static int vegam_thermal_setup_fan_table(struct pp_hwmgr *hwmgr) +{ + PP_ASSERT_WITH_CODE(hwmgr->thermal_controller.fanInfo.bNoFan, + "VBIOS fan info is not correct!", + ); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + return 0; +} + +const struct pp_smumgr_func vegam_smu_funcs = { + .smu_init = vegam_smu_init, + .smu_fini = smu7_smu_fini, + .start_smu = vegam_start_smu, + .check_fw_load_finish = smu7_check_fw_load_finish, + .request_smu_load_fw = smu7_reload_firmware, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = smu7_send_msg_to_smc, + .send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter, + .process_firmware_header = vegam_process_firmware_header, + .is_dpm_running = vegam_is_dpm_running, + .get_mac_definition = vegam_get_mac_definition, + .update_smc_table = vegam_update_smc_table, + .init_smc_table = vegam_init_smc_table, + .get_offsetof = vegam_get_offsetof, + .populate_all_graphic_levels = vegam_populate_all_graphic_levels, + .populate_all_memory_levels = vegam_populate_all_memory_levels, + .update_sclk_threshold = vegam_update_sclk_threshold, + .is_hw_avfs_present = vegam_is_hw_avfs_present, + .thermal_avfs_enable = vegam_thermal_avfs_enable, + .is_dpm_running = vegam_is_dpm_running, + .thermal_setup_fan_table = vegam_thermal_setup_fan_table, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h new file mode 100644 index 000000000000..2b6558238500 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.h @@ -0,0 +1,75 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _VEGAM_SMUMANAGER_H +#define _VEGAM_SMUMANAGER_H + + +#include +#include "smu75_discrete.h" +#include "smu7_smumgr.h" + +#define SMC_RAM_END 0x40000 + +#define DPMTuning_Uphyst_Shift 0 +#define DPMTuning_Downhyst_Shift 8 +#define DPMTuning_Activity_Shift 16 + +#define GraphicsDPMTuning_VEGAM 0x001e6400 +#define MemoryDPMTuning_VEGAM 0x000f3c0a +#define SclkDPMTuning_VEGAM 0x002d000a +#define MclkDPMTuning_VEGAM 0x001f100a + + +struct vegam_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; + + uint32_t DisplayCac; + uint32_t BAPM_TEMP_GRADIENT; + uint16_t BAPMTI_R[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS]; + uint16_t BAPMTI_RC[SMU75_DTE_ITERATIONS * SMU75_DTE_SOURCES * SMU75_DTE_SINKS]; +}; + +struct vegam_range_table { + uint32_t trans_lower_frequency; /* in 10khz */ + uint32_t trans_upper_frequency; +}; + +struct vegam_smumgr { + struct smu7_smumgr smu7_data; + uint8_t protected_mode; + SMU75_Discrete_DpmTable smc_state_table; + struct SMU75_Discrete_Ulv ulv_setting; + struct SMU75_Discrete_PmFuses power_tune_table; + struct vegam_range_table range_table[NUM_SCLK_RANGE]; + const struct vegam_pt_defaults *power_tune_defaults; + uint32_t bif_sclk_table[SMU75_MAX_LEVELS_LINK]; +}; + + +#endif -- cgit v1.2.1 From 0c24e7ef233b528699798a3db3ab57ee0317f2f0 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Wed, 11 Apr 2018 15:38:11 -0500 Subject: drm/amd/powerplay: add specific changes for VEGAM in smu7_hwmgr.c VEGAM specific changes for smu7: 1. add avfs control. 2. add a smc message defferent as smu7. 3. don't switch mc arb memory timing. 4. update LCAC_MC0/1_CNTL value. Signed-off-by: Eric Huang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 71 ++++++++++++++++++++---- 1 file changed, 61 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c9dd0bec1e24..4c94e7a057e9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -83,6 +83,14 @@ static const struct profile_mode_setting smu7_profiling[5] = {1, 0, 5, 30, 0, 0, 0, 0}, }; +#define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) + +#define ixPWR_SVI2_PLANE1_LOAD 0xC0200280 +#define PWR_SVI2_PLANE1_LOAD__PSI1_MASK 0x00000020L +#define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK 0x00000040L +#define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT 0x00000005 +#define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT 0x00000006 + /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */ enum DPM_EVENT_SRC { DPM_EVENT_SRC_ANALOG = 0, @@ -164,6 +172,13 @@ static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr) */ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr) { + if (hwmgr->chip_id == CHIP_VEGAM) { + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0); + PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0); + } + if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable); @@ -964,6 +979,22 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr) +{ + struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + smum_get_offsetof(hwmgr, + SMU_SoftRegisters, HandshakeDisables); + + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); + return 0; +} + static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); @@ -987,6 +1018,9 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) /* enable SCLK dpm */ if (!data->sclk_dpm_key_disabled) + if (hwmgr->chip_id == CHIP_VEGAM) + smu7_disable_sclk_vce_handshake(hwmgr); + PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), "Failed to enable SCLK DPM during DPM Start Function!", @@ -996,13 +1030,15 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK)) smu7_disable_handshake_uvd(hwmgr); + PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Enable)), "Failed to enable MCLK DPM during DPM Start Function!", return -EINVAL); - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); + if (hwmgr->chip_family != CHIP_VEGAM) + PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); if (hwmgr->chip_family == AMDGPU_FAMILY_CI) { @@ -1018,8 +1054,13 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005); udelay(10); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + if (hwmgr->chip_id == CHIP_VEGAM) { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009); + } else { + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005); + } cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005); } } @@ -1260,10 +1301,12 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to process firmware header!", result = tmp_result); - tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to initialize switch from ArbF0 to F1!", - result = tmp_result); + if (hwmgr->chip_id != CHIP_VEGAM) { + tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr); + PP_ASSERT_WITH_CODE((0 == tmp_result), + "Failed to initialize switch from ArbF0 to F1!", + result = tmp_result); + } result = smu7_setup_default_dpm_tables(hwmgr); PP_ASSERT_WITH_CODE(0 == result, @@ -2753,6 +2796,9 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr, case CHIP_POLARIS12: switch_limit_us = data->is_memory_gddr5 ? 190 : 150; break; + case CHIP_VEGAM: + switch_limit_us = 30; + break; default: switch_limit_us = data->is_memory_gddr5 ? 450 : 150; break; @@ -3801,9 +3847,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) - smum_send_msg_to_smc_with_parameter(hwmgr, - (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) { + if (hwmgr->chip_id == CHIP_VEGAM) + smum_send_msg_to_smc_with_parameter(hwmgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2); + else + smum_send_msg_to_smc_with_parameter(hwmgr, + (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2); + } return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } -- cgit v1.2.1 From eda8377d1628caa07b642fce8fcd1938010e949f Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Wed, 11 Apr 2018 18:23:54 -0500 Subject: drm/powerplay: Add powertune table for VEGAM Add the powertune table for VEGAM. Signed-off-by: Eric Huang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 189 +++++++++++++++++++++ 1 file changed, 189 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index a55ee166ce9f..a264e0c35f45 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -623,6 +623,190 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris11_Kicker[] = { 0xFFFFFFFF } /* End of list */ }; +static const struct gpu_pt_config_reg GCCACConfig_VegaM[] = +{ +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +// Offset Mask Shift Value Type +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + // DIDT_SQ + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, GPU_CONFIGREG_GC_CAC_IND }, + + // DIDT_TD + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, GPU_CONFIGREG_GC_CAC_IND }, + + // DIDT_TCP + // + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, GPU_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, GPU_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } // End of list +}; + +static const struct gpu_pt_config_reg DIDTConfig_VegaM[] = +{ +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +// Offset Mask Shift Value Type +// --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + // DIDT_SQ + // + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + // DIDT_TD + // + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + // DIDT_TCP + // + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT,0x01aa, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } // End of list +}; static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) { uint32_t en = enable ? 1 : 0; @@ -768,6 +952,11 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12); PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_VEGAM) { + result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); } } cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); -- cgit v1.2.1 From abc342538cc4670a107e45037c39d1dda8174563 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Thu, 26 Apr 2018 18:02:55 +0800 Subject: drm/amdgpu: For sriov reset, move IB test into exclusive mode When put the IB test out of exclusive mode, and do sriov reset, the IB test will randomly fail. As out of exclusive mode it uses kiq to do read and write registers, but as it has world switch, the kiq read and write time will be random, sometimes it will beyond the MAX_KIQ_REG_WAIT and then the read or write register will fail, which will result the IB test fail. Signed-off-by: Emily Deng Reviewed-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index e6657ec363b8..9fb20a53d5b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3143,20 +3143,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, /* now we are okay to resume SMC/CP/SDMA */ r = amdgpu_device_ip_reinit_late_sriov(adev); - amdgpu_virt_release_full_gpu(adev, true); if (r) goto error; amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); +error: + amdgpu_virt_release_full_gpu(adev, true); if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { atomic_inc(&adev->vram_lost_counter); r = amdgpu_device_handle_vram_lost(adev); } -error: - return r; } -- cgit v1.2.1 From 6e9c2b88eb42bdda6ba1f1a39238c446782d443e Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Thu, 26 Apr 2018 18:02:14 +0800 Subject: drm/amdgpu/sriov: Need to set in_gpu_reset flag to back after gpu reset After host os reset gpu reset, need to set flag in_gpu_reset to zero. Signed-off-by: Emily Deng Reviewed-by: Monk Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 493348672475..078f70faedcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -260,8 +260,10 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) } while (timeout > 1); flr_done: - if (locked) + if (locked) { + adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); + } /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_lockup_timeout == 0) -- cgit v1.2.1 From 40c21ed6b372c58fb214307f8186f7a0c1885bfc Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 10 Jan 2018 10:01:38 -0500 Subject: drm/amd/display: Fix deadlock when flushing irq Lock irq table when reading a work in queue, unlock to flush the work, lock again till all tasks are cleared Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 490017df371d..4be21bf54749 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -329,14 +329,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; struct irq_list_head *lh; + unsigned long irq_table_flags; DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); - for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { - + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); /* The handler was removed from the table, * it means it is safe to flush all the 'work' * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); flush_work(&lh->work); } } -- cgit v1.2.1 From a80aa93de1a0e69fdb83e04a9aca7c33bfb18941 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Sat, 3 Feb 2018 14:18:07 -0500 Subject: drm/amd/display: Unify dm resume sequence into a single call Merge amdgpu_dm_display_resume function into dm_resume, as it is not called anywhere else anymore. Initially the call was broken down into 2 functions for cursor corruption issue. Now the issue is not visible, hence the dm_resume will perform dm_display_resume in it. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++--------------- 1 file changed, 10 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8379a3705f2d..cc105f1f93b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -644,18 +644,6 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, static int dm_resume(void *handle) { struct amdgpu_device *adev = handle; - struct amdgpu_display_manager *dm = &adev->dm; - int ret = 0; - - /* power on hardware */ - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); - - ret = amdgpu_dm_display_resume(adev); - return ret; -} - -int amdgpu_dm_display_resume(struct amdgpu_device *adev) -{ struct drm_device *ddev = adev->ddev; struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; @@ -666,10 +654,12 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) struct drm_plane *plane; struct drm_plane_state *new_plane_state; struct dm_plane_state *dm_new_plane_state; - - int ret = 0; + int ret; int i; + /* power on hardware */ + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + /* program HPD filter */ dc_resume(dm->dc); @@ -683,8 +673,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) amdgpu_dm_irq_resume_early(adev); /* Do detection*/ - list_for_each_entry(connector, - &ddev->mode_config.connector_list, head) { + list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -706,7 +695,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) } /* Force mode set in atomic comit */ - for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) new_crtc_state->active_changed = true; /* @@ -714,7 +703,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) * them here, since they were duplicated as part of the suspend * procedure. */ - for_each_new_crtc_in_state(adev->dm.cached_state, crtc, new_crtc_state, i) { + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (dm_new_crtc_state->stream) { WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); @@ -723,7 +712,7 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) } } - for_each_new_plane_in_state(adev->dm.cached_state, plane, new_plane_state, i) { + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { dm_new_plane_state = to_dm_plane_state(new_plane_state); if (dm_new_plane_state->dc_state) { WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); @@ -732,9 +721,9 @@ int amdgpu_dm_display_resume(struct amdgpu_device *adev) } } - ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); + ret = drm_atomic_helper_resume(ddev, dm->cached_state); - adev->dm.cached_state = NULL; + dm->cached_state = NULL; amdgpu_dm_irq_resume_late(adev); -- cgit v1.2.1 From 3d777c82bda9a82cc304de5ba51122d248f7a584 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Mon, 16 Apr 2018 17:28:11 -0400 Subject: drm/amd/display: Disallow enabling CRTC without primary plane with FB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The below commit "drm/atomic: Try to preserve the crtc enabled state in drm_atomic_remove_fb, v2" introduces a slight behavioral change to rmfb. Instead of disabling a crtc when the primary plane is disabled, it now preserves it. Since DC is currently not equipped to handle this we need to fail such a commit, otherwise we might see a corrupted screen. This is based on Shirish's previous approach but avoids adding all planes to the new atomic state which leads to a full update in DC for any commit, and is not what we intend. Theoretically DM should be able to deal with states with fully populated planes, even for simple updates, such as cursor updates. This should still be addressed in the future. Signed-off-by: Harry Wentland Tested-by: Michel Dänzer Reviewed-by: Tony Cheng Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index cc105f1f93b8..96a57be3ceb6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4628,6 +4628,7 @@ static int dm_update_crtcs_state(struct dc *dc, struct amdgpu_dm_connector *aconnector = NULL; struct drm_connector_state *new_con_state = NULL; struct dm_connector_state *dm_conn_state = NULL; + struct drm_plane_state *new_plane_state = NULL; new_stream = NULL; @@ -4635,6 +4636,13 @@ static int dm_update_crtcs_state(struct dc *dc, dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); acrtc = to_amdgpu_crtc(crtc); + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary); + + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) { + ret = -EINVAL; + goto fail; + } + aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); /* TODO This hack should go away */ @@ -4831,7 +4839,7 @@ static int dm_update_planes_state(struct dc *dc, if (!dm_old_crtc_state->stream) continue; - DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n", + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", plane->base.id, old_plane_crtc->base.id); if (!dc_remove_plane_from_context( -- cgit v1.2.1 From 388277b17023e69fdbf6ed68cec42fa7616ce454 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Wed, 11 Apr 2018 13:19:56 -0400 Subject: drm/amd/display: fix issue related to infopacket was not transmitted Check in code was incorrect, and infopacket is only transmitted after update function is called multiple times. Purpose of the function was to check if infopackets are being enabled, and then enable global control. Fix the code to do this. Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dce/dce_stream_encoder.c | 25 ++++++---------------- .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 11 ++++++---- 2 files changed, 13 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 84e26c894046..e265a0abe361 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -819,7 +819,7 @@ static void dce110_stream_encoder_update_dp_info_packets( const struct encoder_info_frame *info_frame) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; if (info_frame->vsc.valid) dce110_update_generic_info_packet( @@ -853,6 +853,7 @@ static void dce110_stream_encoder_update_dp_info_packets( * Therefore we need to enable master bit * if at least on of the fields is not 0 */ + value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } @@ -862,7 +863,7 @@ static void dce110_stream_encoder_stop_dp_info_packets( { /* stop generic packets on DP */ struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; if (enc110->se_mask->DP_SEC_AVI_ENABLE) { REG_SET_7(DP_SEC_CNTL, 0, @@ -875,25 +876,10 @@ static void dce110_stream_encoder_stop_dp_info_packets( DP_SEC_STREAM_ENABLE, 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN1_0) - if (enc110->se_mask->DP_SEC_GSP7_ENABLE) { - REG_SET_10(DP_SEC_CNTL, 0, - DP_SEC_GSP0_ENABLE, 0, - DP_SEC_GSP1_ENABLE, 0, - DP_SEC_GSP2_ENABLE, 0, - DP_SEC_GSP3_ENABLE, 0, - DP_SEC_GSP4_ENABLE, 0, - DP_SEC_GSP5_ENABLE, 0, - DP_SEC_GSP6_ENABLE, 0, - DP_SEC_GSP7_ENABLE, 0, - DP_SEC_MPG_ENABLE, 0, - DP_SEC_STREAM_ENABLE, 0); - } -#endif /* this register shared with audio info frame. * therefore we need to keep master enabled * if at least one of the fields is not 0 */ - + value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); @@ -1496,7 +1482,7 @@ static void dce110_se_disable_dp_audio( struct stream_encoder *enc) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; /* Disable Audio packets */ REG_UPDATE_5(DP_SEC_CNTL, @@ -1508,6 +1494,7 @@ static void dce110_se_disable_dp_audio( /* This register shared with encoder info frame. Therefore we need to keep master enabled if at least on of the fields is not 0 */ + value = REG_READ(DP_SEC_CNTL); if (value != 0) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9ec46f8fc7cc..befd8639ad55 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -686,7 +686,7 @@ void enc1_stream_encoder_update_dp_info_packets( const struct encoder_info_frame *info_frame) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; if (info_frame->vsc.valid) enc1_update_generic_info_packet( @@ -713,6 +713,7 @@ void enc1_stream_encoder_update_dp_info_packets( REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid); REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid); + /* This bit is the master enable bit. * When enabling secondary stream engine, * this master bit must also be set. @@ -720,6 +721,7 @@ void enc1_stream_encoder_update_dp_info_packets( * Therefore we need to enable master bit * if at least on of the fields is not 0 */ + value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); } @@ -729,7 +731,7 @@ void enc1_stream_encoder_stop_dp_info_packets( { /* stop generic packets on DP */ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; REG_SET_10(DP_SEC_CNTL, 0, DP_SEC_GSP0_ENABLE, 0, @@ -746,7 +748,7 @@ void enc1_stream_encoder_stop_dp_info_packets( /* this register shared with audio info frame. * therefore we need to keep master enabled * if at least one of the fields is not 0 */ - + value = REG_READ(DP_SEC_CNTL); if (value) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); @@ -1356,7 +1358,7 @@ static void enc1_se_disable_dp_audio( struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t value = REG_READ(DP_SEC_CNTL); + uint32_t value = 0; /* Disable Audio packets */ REG_UPDATE_5(DP_SEC_CNTL, @@ -1369,6 +1371,7 @@ static void enc1_se_disable_dp_audio( /* This register shared with encoder info frame. Therefore we need to * keep master enabled if at least on of the fields is not 0 */ + value = REG_READ(DP_SEC_CNTL); if (value != 0) REG_UPDATE(DP_SEC_CNTL, DP_SEC_STREAM_ENABLE, 1); -- cgit v1.2.1 From 8e357610ca0cc44a875df68f608a756fa56b1797 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 9 Apr 2018 15:47:42 -0400 Subject: drm/amd/display: Make program_output_csc HWSS interface function Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 +++-- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 7 +++++++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index e547f46d3516..7cecab0ce297 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1564,7 +1564,7 @@ static void program_csc_matrix(struct pipe_ctx *pipe_ctx, } } -static void program_output_csc(struct dc *dc, +static void dcn10_program_output_csc(struct dc *dc, struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace, uint16_t *matrix, @@ -1917,7 +1917,7 @@ static void update_dchubp_dpp( /*gamut remap*/ program_gamut_remap(pipe_ctx); - program_output_csc(dc, + dc->hwss.program_output_csc(dc, pipe_ctx, pipe_ctx->stream->output_color_space, pipe_ctx->stream->csc_color_matrix.matrix, @@ -2667,6 +2667,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .update_pending_status = dcn10_update_pending_status, .set_input_transfer_func = dcn10_set_input_transfer_func, .set_output_transfer_func = dcn10_set_output_transfer_func, + .program_output_csc = dcn10_program_output_csc, .power_down = dce110_power_down, .enable_accelerated_mode = dce110_enable_accelerated_mode, .enable_timing_synchronization = dcn10_enable_timing_synchronization, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index f54d478ffc5c..be6cf7ee1468 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -95,6 +95,12 @@ struct hw_sequencer_funcs { enum dc_color_space colorspace, uint16_t *matrix); + void (*program_output_csc)(struct dc *dc, + struct pipe_ctx *pipe_ctx, + enum dc_color_space colorspace, + uint16_t *matrix, + int opp_id); + void (*update_plane_addr)( const struct dc *dc, struct pipe_ctx *pipe_ctx); @@ -203,6 +209,7 @@ struct hw_sequencer_funcs { void (*set_cursor_position)(struct pipe_ctx *pipe); void (*set_cursor_attribute)(struct pipe_ctx *pipe); + }; void color_space_to_black_color( -- cgit v1.2.1 From 3158223efde597521505b586a88a6d43c8f2324f Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 9 Apr 2018 17:19:27 -0400 Subject: drm/amd/display: Refactor otg_blank sequence Also rename otg_blank to blank_pixel_data. Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 6 ++-- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 35 +++++++++++----------- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 8 ++++- 3 files changed, 28 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index e70ccb9b6afe..51c6c70a4a30 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1233,7 +1233,7 @@ static void program_scaler(const struct dc *dc, &pipe_ctx->plane_res.scl_data); } -static enum dc_status dce110_prog_pixclk_crtc_otg( +static enum dc_status dce110_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1299,7 +1299,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx[pipe_ctx->pipe_idx]; /* */ - dc->hwss.prog_pixclk_crtc_otg(pipe_ctx, context, dc); + dc->hwss.enable_stream_timing(pipe_ctx, context, dc); /* FPGA does not program backend */ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { @@ -3041,7 +3041,7 @@ static const struct hw_sequencer_funcs dce110_funcs = { .get_position = get_position, .set_static_screen_control = set_static_screen_control, .reset_hw_ctx_wrap = dce110_reset_hw_ctx_wrap, - .prog_pixclk_crtc_otg = dce110_prog_pixclk_crtc_otg, + .enable_stream_timing = dce110_enable_stream_timing, .setup_stereo = NULL, .set_avmute = dce110_set_avmute, .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 7cecab0ce297..8eea38b9e32b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -593,7 +593,7 @@ static void false_optc_underflow_wa( tg->funcs->clear_optc_underflow(tg); } -static enum dc_status dcn10_prog_pixclk_crtc_otg( +static enum dc_status dcn10_enable_stream_timing( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc) @@ -1950,9 +1950,9 @@ static void update_dchubp_dpp( hubp->funcs->set_blank(hubp, false); } -static void dcn10_otg_blank( +static void dcn10_blank_pixel_data( struct dc *dc, - struct stream_resource stream_res, + struct stream_resource *stream_res, struct dc_stream_state *stream, bool blank) { @@ -1963,21 +1963,21 @@ static void dcn10_otg_blank( color_space = stream->output_color_space; color_space_to_black_color(dc, color_space, &black_color); - if (stream_res.tg->funcs->set_blank_color) - stream_res.tg->funcs->set_blank_color( - stream_res.tg, + if (stream_res->tg->funcs->set_blank_color) + stream_res->tg->funcs->set_blank_color( + stream_res->tg, &black_color); if (!blank) { - if (stream_res.tg->funcs->set_blank) - stream_res.tg->funcs->set_blank(stream_res.tg, blank); - if (stream_res.abm) - stream_res.abm->funcs->set_abm_level(stream_res.abm, stream->abm_level); + if (stream_res->tg->funcs->set_blank) + stream_res->tg->funcs->set_blank(stream_res->tg, blank); + if (stream_res->abm) + stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level); } else if (blank) { - if (stream_res.abm) - stream_res.abm->funcs->set_abm_immediate_disable(stream_res.abm); - if (stream_res.tg->funcs->set_blank) - stream_res.tg->funcs->set_blank(stream_res.tg, blank); + if (stream_res->abm) + stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm); + if (stream_res->tg->funcs->set_blank) + stream_res->tg->funcs->set_blank(stream_res->tg, blank); } } @@ -2016,7 +2016,7 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->program_global_sync( pipe_ctx->stream_res.tg); - dcn10_otg_blank(dc, pipe_ctx->stream_res, + dc->hwss.blank_pixel_data(dc, &pipe_ctx->stream_res, pipe_ctx->stream, blank); } @@ -2136,7 +2136,7 @@ static void dcn10_apply_ctx_for_surface( if (num_planes == 0) { /* OTG blank before remove all front end */ - dcn10_otg_blank(dc, top_pipe_to_program->stream_res, top_pipe_to_program->stream, true); + dc->hwss.blank_pixel_data(dc, &top_pipe_to_program->stream_res, top_pipe_to_program->stream, true); } /* Disconnect unused mpcc */ @@ -2679,10 +2679,11 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .blank_stream = dce110_blank_stream, .enable_display_power_gating = dcn10_dummy_display_power_gating, .disable_plane = dcn10_disable_plane, + .blank_pixel_data = dcn10_blank_pixel_data, .pipe_control_lock = dcn10_pipe_control_lock, .set_bandwidth = dcn10_set_bandwidth, .reset_hw_ctx_wrap = reset_hw_ctx_wrap, - .prog_pixclk_crtc_otg = dcn10_prog_pixclk_crtc_otg, + .enable_stream_timing = dcn10_enable_stream_timing, .set_drr = set_drr, .get_position = get_position, .set_static_screen_control = set_static_screen_control, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index be6cf7ee1468..29abf3ecb39c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -65,6 +65,7 @@ struct dchub_init_data; struct dc_static_screen_events; struct resource_pool; struct resource_context; +struct stream_resource; struct hw_sequencer_funcs { @@ -162,6 +163,11 @@ struct hw_sequencer_funcs { struct dc *dc, struct pipe_ctx *pipe, bool lock); + void (*blank_pixel_data)( + struct dc *dc, + struct stream_resource *stream_res, + struct dc_stream_state *stream, + bool blank); void (*set_bandwidth)( struct dc *dc, @@ -177,7 +183,7 @@ struct hw_sequencer_funcs { void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_events *events); - enum dc_status (*prog_pixclk_crtc_otg)( + enum dc_status (*enable_stream_timing)( struct pipe_ctx *pipe_ctx, struct dc_state *context, struct dc *dc); -- cgit v1.2.1 From 50834eb488a30026de040ab5d209ca9f980ae14b Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Wed, 11 Apr 2018 15:22:10 -0400 Subject: drm/amd/display: DP link validation bug for YUV422 remove limit YUV422 color depth to 24bits which is workaround for old ASIC Signed-off-by: Hersen Wu Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 47 +++++++++++------------- 1 file changed, 21 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 0a190c2b6898..7d609c71394b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1379,34 +1379,29 @@ static uint32_t bandwidth_in_kbps_from_timing( uint32_t bits_per_channel = 0; uint32_t kbps; - if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: bits_per_channel = 12; - else{ - - switch (timing->display_color_depth) { - - case COLOR_DEPTH_666: - bits_per_channel = 6; - break; - case COLOR_DEPTH_888: - bits_per_channel = 8; - break; - case COLOR_DEPTH_101010: - bits_per_channel = 10; - break; - case COLOR_DEPTH_121212: - bits_per_channel = 12; - break; - case COLOR_DEPTH_141414: - bits_per_channel = 14; - break; - case COLOR_DEPTH_161616: - bits_per_channel = 16; - break; - default: - break; - } + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; } + ASSERT(bits_per_channel != 0); kbps = timing->pix_clk_khz; -- cgit v1.2.1 From 8a79593d77de17619e99c23495ac243759704b87 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Mon, 16 Apr 2018 13:30:02 -0400 Subject: drm/amd/display: dal 3.1.43 Signed-off-by: Tony Cheng Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7ac8a1bee5ac..92152980b0ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.42" +#define DC_VER "3.1.43" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From 55a01d4023ce7249eed361731b373c78e62b73e0 Mon Sep 17 00:00:00 2001 From: Krunoslav Kovac Date: Fri, 13 Apr 2018 16:06:24 -0400 Subject: drm/amd/display: Add user_regamma to color module Signed-off-by: Krunoslav Kovac Reviewed-by: Anthony Koo Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/modules/color/color_gamma.c | 314 ++++++++++++++++++++- .../drm/amd/display/modules/color/color_gamma.h | 48 +++- 2 files changed, 348 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index e7e374f56864..ad0ff50305ce 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -185,14 +185,14 @@ struct dividers { static void build_coefficients(struct gamma_coefficients *coefficients, bool is_2_4) { - static const int32_t numerator01[] = { 31308, 180000}; - static const int32_t numerator02[] = { 12920, 4500}; - static const int32_t numerator03[] = { 55, 99}; - static const int32_t numerator04[] = { 55, 99}; - static const int32_t numerator05[] = { 2400, 2200}; + static const int32_t numerator01[] = { 31308, 180000}; + static const int32_t numerator02[] = { 12920, 4500}; + static const int32_t numerator03[] = { 55, 99}; + static const int32_t numerator04[] = { 55, 99}; + static const int32_t numerator05[] = { 2400, 2200}; - uint32_t i = 0; - uint32_t index = is_2_4 == true ? 0:1; + uint32_t i = 0; + uint32_t index = is_2_4 == true ? 0:1; do { coefficients->a0[i] = dal_fixed31_32_from_fraction( @@ -691,7 +691,7 @@ static void build_degamma(struct pwl_float_data_ex *curve, } } -static bool scale_gamma(struct pwl_float_data *pwl_rgb, +static void scale_gamma(struct pwl_float_data *pwl_rgb, const struct dc_gamma *ramp, struct dividers dividers) { @@ -752,11 +752,9 @@ static bool scale_gamma(struct pwl_float_data *pwl_rgb, dividers.divider3); rgb->b = dal_fixed31_32_mul(rgb_last->b, dividers.divider3); - - return true; } -static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb, +static void scale_gamma_dx(struct pwl_float_data *pwl_rgb, const struct dc_gamma *ramp, struct dividers dividers) { @@ -818,8 +816,71 @@ static bool scale_gamma_dx(struct pwl_float_data *pwl_rgb, pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g); pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int( pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); +} - return true; +/* todo: all these scale_gamma functions are inherently the same but + * take different structures as params or different format for ramp + * values. We could probably implement it in a more generic fashion + */ +static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, + const struct regamma_ramp *ramp, + struct dividers dividers) +{ + unsigned short max_driver = 0xFFFF; + unsigned short max_os = 0xFF00; + unsigned short scaler = max_os; + uint32_t i; + struct pwl_float_data *rgb = pwl_rgb; + struct pwl_float_data *rgb_last = rgb + GAMMA_RGB_256_ENTRIES - 1; + + i = 0; + do { + if (ramp->gamma[i] > max_os || + ramp->gamma[i + 256] > max_os || + ramp->gamma[i + 512] > max_os) { + scaler = max_driver; + break; + } + i++; + } while (i != GAMMA_RGB_256_ENTRIES); + + i = 0; + do { + rgb->r = dal_fixed31_32_from_fraction( + ramp->gamma[i], scaler); + rgb->g = dal_fixed31_32_from_fraction( + ramp->gamma[i + 256], scaler); + rgb->b = dal_fixed31_32_from_fraction( + ramp->gamma[i + 512], scaler); + + ++rgb; + ++i; + } while (i != GAMMA_RGB_256_ENTRIES); + + rgb->r = dal_fixed31_32_mul(rgb_last->r, + dividers.divider1); + rgb->g = dal_fixed31_32_mul(rgb_last->g, + dividers.divider1); + rgb->b = dal_fixed31_32_mul(rgb_last->b, + dividers.divider1); + + ++rgb; + + rgb->r = dal_fixed31_32_mul(rgb_last->r, + dividers.divider2); + rgb->g = dal_fixed31_32_mul(rgb_last->g, + dividers.divider2); + rgb->b = dal_fixed31_32_mul(rgb_last->b, + dividers.divider2); + + ++rgb; + + rgb->r = dal_fixed31_32_mul(rgb_last->r, + dividers.divider3); + rgb->g = dal_fixed31_32_mul(rgb_last->g, + dividers.divider3); + rgb->b = dal_fixed31_32_mul(rgb_last->b, + dividers.divider3); } /* @@ -949,7 +1010,7 @@ static inline void copy_rgb_regamma_to_coordinates_x( uint32_t i = 0; const struct pwl_float_data_ex *rgb_regamma = rgb_ex; - while (i <= hw_points_num) { + while (i <= hw_points_num + 1) { coords->regamma_y_red = rgb_regamma->r; coords->regamma_y_green = rgb_regamma->g; coords->regamma_y_blue = rgb_regamma->b; @@ -1002,6 +1063,102 @@ static bool calculate_interpolated_hardware_curve( return true; } +/* The "old" interpolation uses a complicated scheme to build an array of + * coefficients while also using an array of 0-255 normalized to 0-1 + * Then there's another loop using both of the above + new scaled user ramp + * and we concatenate them. It also searches for points of interpolation and + * uses enums for positions. + * + * This function uses a different approach: + * user ramp is always applied on X with 0/255, 1/255, 2/255, ..., 255/255 + * To find index for hwX , we notice the following: + * i/255 <= hwX < (i+1)/255 <=> i <= 255*hwX < i+1 + * See apply_lut_1d which is the same principle, but on 4K entry 1D LUT + * + * Once the index is known, combined Y is simply: + * user_ramp(index) + (hwX-index/255)*(user_ramp(index+1) - user_ramp(index) + * + * We should switch to this method in all cases, it's simpler and faster + * ToDo one day - for now this only applies to ADL regamma to avoid regression + * for regular use cases (sRGB and PQ) + */ +static void interpolate_user_regamma(uint32_t hw_points_num, + struct pwl_float_data *rgb_user, + bool apply_degamma, + struct dc_transfer_func_distributed_points *tf_pts) +{ + uint32_t i; + uint32_t color = 0; + int32_t index; + int32_t index_next; + struct fixed31_32 *tf_point; + struct fixed31_32 hw_x; + struct fixed31_32 norm_factor = + dal_fixed31_32_from_int_nonconst(255); + struct fixed31_32 norm_x; + struct fixed31_32 index_f; + struct fixed31_32 lut1; + struct fixed31_32 lut2; + struct fixed31_32 delta_lut; + struct fixed31_32 delta_index; + + i = 0; + /* fixed_pt library has problems handling too small values */ + while (i != 32) { + tf_pts->red[i] = dal_fixed31_32_zero; + tf_pts->green[i] = dal_fixed31_32_zero; + tf_pts->blue[i] = dal_fixed31_32_zero; + ++i; + } + while (i <= hw_points_num + 1) { + for (color = 0; color < 3; color++) { + if (color == 0) + tf_point = &tf_pts->red[i]; + else if (color == 1) + tf_point = &tf_pts->green[i]; + else + tf_point = &tf_pts->blue[i]; + + if (apply_degamma) { + if (color == 0) + hw_x = coordinates_x[i].regamma_y_red; + else if (color == 1) + hw_x = coordinates_x[i].regamma_y_green; + else + hw_x = coordinates_x[i].regamma_y_blue; + } else + hw_x = coordinates_x[i].x; + + norm_x = dal_fixed31_32_mul(norm_factor, hw_x); + index = dal_fixed31_32_floor(norm_x); + if (index < 0 || index > 255) + continue; + + index_f = dal_fixed31_32_from_int_nonconst(index); + index_next = (index == 255) ? index : index + 1; + + if (color == 0) { + lut1 = rgb_user[index].r; + lut2 = rgb_user[index_next].r; + } else if (color == 1) { + lut1 = rgb_user[index].g; + lut2 = rgb_user[index_next].g; + } else { + lut1 = rgb_user[index].b; + lut2 = rgb_user[index_next].b; + } + + // we have everything now, so interpolate + delta_lut = dal_fixed31_32_sub(lut2, lut1); + delta_index = dal_fixed31_32_sub(norm_x, index_f); + + *tf_point = dal_fixed31_32_add(lut1, + dal_fixed31_32_mul(delta_index, delta_lut)); + } + ++i; + } +} + static void build_new_custom_resulted_curve( uint32_t hw_points_num, struct dc_transfer_func_distributed_points *tf_pts) @@ -1025,6 +1182,29 @@ static void build_new_custom_resulted_curve( } } +static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma, + uint32_t hw_points_num) +{ + uint32_t i; + + struct gamma_coefficients coeff; + struct pwl_float_data_ex *rgb = rgb_regamma; + const struct hw_x_point *coord_x = coordinates_x; + + build_coefficients(&coeff, true); + + i = 0; + while (i != hw_points_num + 1) { + rgb->r = translate_from_linear_space_ex( + coord_x->x, &coeff, 0); + rgb->g = rgb->r; + rgb->b = rgb->r; + ++coord_x; + ++rgb; + ++i; + } +} + static bool map_regamma_hw_to_x_user( const struct dc_gamma *ramp, struct pixel_gamma_point *coeff128, @@ -1062,6 +1242,7 @@ static bool map_regamma_hw_to_x_user( } } + /* this should be named differently, all it does is clamp to 0-1 */ build_new_custom_resulted_curve(hw_points_num, tf_pts); return true; @@ -1168,6 +1349,113 @@ rgb_user_alloc_fail: return ret; } +bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, + const struct regamma_lut *regamma) +{ + struct gamma_coefficients coeff; + const struct hw_x_point *coord_x = coordinates_x; + uint32_t i = 0; + + do { + coeff.a0[i] = dal_fixed31_32_from_fraction( + regamma->coeff.A0[i], 10000000); + coeff.a1[i] = dal_fixed31_32_from_fraction( + regamma->coeff.A1[i], 1000); + coeff.a2[i] = dal_fixed31_32_from_fraction( + regamma->coeff.A2[i], 1000); + coeff.a3[i] = dal_fixed31_32_from_fraction( + regamma->coeff.A3[i], 1000); + coeff.user_gamma[i] = dal_fixed31_32_from_fraction( + regamma->coeff.gamma[i], 1000); + + ++i; + } while (i != 3); + + i = 0; + /* fixed_pt library has problems handling too small values */ + while (i != 32) { + output_tf->tf_pts.red[i] = dal_fixed31_32_zero; + output_tf->tf_pts.green[i] = dal_fixed31_32_zero; + output_tf->tf_pts.blue[i] = dal_fixed31_32_zero; + ++coord_x; + ++i; + } + while (i != MAX_HW_POINTS + 1) { + output_tf->tf_pts.red[i] = translate_from_linear_space_ex( + coord_x->x, &coeff, 0); + output_tf->tf_pts.green[i] = translate_from_linear_space_ex( + coord_x->x, &coeff, 1); + output_tf->tf_pts.blue[i] = translate_from_linear_space_ex( + coord_x->x, &coeff, 2); + ++coord_x; + ++i; + } + + // this function just clamps output to 0-1 + build_new_custom_resulted_curve(MAX_HW_POINTS, &output_tf->tf_pts); + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + + return true; +} + +bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, + const struct regamma_lut *regamma) +{ + struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; + struct dividers dividers; + + struct pwl_float_data *rgb_user = NULL; + struct pwl_float_data_ex *rgb_regamma = NULL; + bool ret = false; + + if (regamma == NULL) + return false; + + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; + + rgb_user = kzalloc(sizeof(*rgb_user) * (GAMMA_RGB_256_ENTRIES + _EXTRA_POINTS), + GFP_KERNEL); + if (!rgb_user) + goto rgb_user_alloc_fail; + + rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); + if (!rgb_regamma) + goto rgb_regamma_alloc_fail; + + dividers.divider1 = dal_fixed31_32_from_fraction(3, 2); + dividers.divider2 = dal_fixed31_32_from_int(2); + dividers.divider3 = dal_fixed31_32_from_fraction(5, 2); + + scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers); + + if (regamma->flags.bits.applyDegamma == 1) { + apply_degamma_for_user_regamma(rgb_regamma, MAX_HW_POINTS); + copy_rgb_regamma_to_coordinates_x(coordinates_x, + MAX_HW_POINTS, rgb_regamma); + } + + interpolate_user_regamma(MAX_HW_POINTS, rgb_user, + regamma->flags.bits.applyDegamma, tf_pts); + + // no custom HDR curves! + tf_pts->end_exponent = 0; + tf_pts->x_point_at_y1_red = 1; + tf_pts->x_point_at_y1_green = 1; + tf_pts->x_point_at_y1_blue = 1; + + // this function just clamps output to 0-1 + build_new_custom_resulted_curve(MAX_HW_POINTS, tf_pts); + + ret = true; + + kfree(rgb_regamma); +rgb_regamma_alloc_fail: + kfree(rgb_user); +rgb_user_alloc_fail: + return ret; +} + bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, const struct dc_gamma *ramp, bool mapUserRamp) { diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h index b7f9bc27d101..b64048991a95 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h @@ -32,6 +32,47 @@ struct dc_transfer_func_distributed_points; struct dc_rgb_fixed; enum dc_transfer_func_predefined; +/* For SetRegamma ADL interface support + * Must match escape type + */ +union regamma_flags { + unsigned int raw; + struct { + unsigned int gammaRampArray :1; // RegammaRamp is in use + unsigned int gammaFromEdid :1; //gamma from edid is in use + unsigned int gammaFromEdidEx :1; //gamma from edid is in use , but only for Display Id 1.2 + unsigned int gammaFromUser :1; //user custom gamma is used + unsigned int coeffFromUser :1; //coeff. A0-A3 from user is in use + unsigned int coeffFromEdid :1; //coeff. A0-A3 from edid is in use + unsigned int applyDegamma :1; //flag for additional degamma correction in driver + unsigned int gammaPredefinedSRGB :1; //flag for SRGB gamma + unsigned int gammaPredefinedPQ :1; //flag for PQ gamma + unsigned int gammaPredefinedPQ2084Interim :1; //flag for PQ gamma, lower max nits + unsigned int gammaPredefined36 :1; //flag for 3.6 gamma + unsigned int gammaPredefinedReset :1; //flag to return to previous gamma + } bits; +}; + +struct regamma_ramp { + unsigned short gamma[256*3]; // gamma ramp packed in same way as OS windows ,r , g & b +}; + +struct regamma_coeff { + int gamma[3]; + int A0[3]; + int A1[3]; + int A2[3]; + int A3[3]; +}; + +struct regamma_lut { + union regamma_flags flags; + union { + struct regamma_ramp ramp; + struct regamma_coeff coeff; + }; +}; + void setup_x_points_distribution(void); void precompute_pq(void); void precompute_de_pq(void); @@ -45,9 +86,14 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); -bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, +bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, struct dc_transfer_func_distributed_points *points); +bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, + const struct regamma_lut *regamma); + +bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, + const struct regamma_lut *regamma); #endif /* COLOR_MOD_COLOR_GAMMA_H_ */ -- cgit v1.2.1 From c0aceb7d6303ca138b0def39f25d432057548f43 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Mon, 16 Apr 2018 15:14:15 -0400 Subject: drm/amd/display: add cursor TTU CRQ related Signed-off-by: Charlene Liu Reviewed-by: Dmytro Laktyushkin Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 7 +++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 10 +++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 58062172cf3f..759fcd1e666a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -613,6 +613,13 @@ void hubp1_program_deadline( REG_SET(DCN_SURF1_TTU_CNTL1, 0, REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_c); + + REG_SET_3(DCN_CUR0_TTU_CNTL0, 0, + REFCYC_PER_REQ_DELIVERY, ttu_attr->refcyc_per_req_delivery_cur0, + QoS_LEVEL_FIXED, ttu_attr->qos_level_fixed_cur0, + QoS_RAMP_DISABLE, ttu_attr->qos_ramp_disable_cur0); + REG_SET(DCN_CUR0_TTU_CNTL1, 0, + REFCYC_PER_REQ_DELIVERY_PRE, ttu_attr->refcyc_per_req_delivery_pre_cur0); } static void hubp1_setup( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 920ae3a1b412..02045a8c30fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -93,6 +93,8 @@ SRI(DCN_SURF0_TTU_CNTL1, HUBPREQ, id),\ SRI(DCN_SURF1_TTU_CNTL0, HUBPREQ, id),\ SRI(DCN_SURF1_TTU_CNTL1, HUBPREQ, id),\ + SRI(DCN_CUR0_TTU_CNTL0, HUBPREQ, id),\ + SRI(DCN_CUR0_TTU_CNTL1, HUBPREQ, id),\ SRI(HUBP_CLK_CNTL, HUBP, id) /* Register address initialization macro for ASICs with VM */ @@ -203,6 +205,8 @@ uint32_t DCN_SURF0_TTU_CNTL1; \ uint32_t DCN_SURF1_TTU_CNTL0; \ uint32_t DCN_SURF1_TTU_CNTL1; \ + uint32_t DCN_CUR0_TTU_CNTL0; \ + uint32_t DCN_CUR0_TTU_CNTL1; \ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_MSB; \ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LSB; \ uint32_t DCN_VM_CONTEXT0_PAGE_TABLE_START_ADDR_MSB; \ @@ -368,7 +372,11 @@ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_2, DST_Y_PER_PTE_ROW_NOM_C, mask_sh),\ HUBP_SF(HUBPREQ0_NOM_PARAMETERS_3, REFCYC_PER_PTE_GROUP_NOM_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, mask_sh),\ - HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh) + HUBP_SF(HUBPREQ0_DCN_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, REFCYC_PER_REQ_DELIVERY, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_LEVEL_FIXED, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL0, QoS_RAMP_DISABLE, mask_sh),\ + HUBP_SF(HUBPREQ0_DCN_CUR0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh) #define HUBP_MASK_SH_LIST_DCN10(mask_sh)\ HUBP_MASK_SH_LIST_DCN(mask_sh),\ -- cgit v1.2.1 From 7c91bd434e5765dc5dbcf155253f2b8c740fbef9 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 13 Apr 2018 09:40:21 -0400 Subject: drm/amd/display: add some DTN logs for input and output tf Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 25 +++++++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 27 ++++++++++++++ .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 41 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h | 16 +++++++++ 4 files changed, 109 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index c008a71ebc4e..8c4d9e523331 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -98,6 +98,30 @@ enum gamut_remap_select { GAMUT_REMAP_COMB_COEFF }; +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s) +{ + struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base); + + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_LUT_MODE, &s->igam_lut_mode); + REG_GET(CM_IGAM_CONTROL, + CM_IGAM_INPUT_FORMAT, &s->igam_input_format); + REG_GET(CM_DGAM_CONTROL, + CM_DGAM_LUT_MODE, &s->dgam_lut_mode); + REG_GET(CM_RGAM_CONTROL, + CM_RGAM_LUT_MODE, &s->rgam_lut_mode); + REG_GET(CM_GAMUT_REMAP_CONTROL, + CM_GAMUT_REMAP_MODE, &s->gamut_remap_mode); + + s->gamut_remap_c11_c12 = REG_READ(CM_GAMUT_REMAP_C11_C12); + s->gamut_remap_c13_c14 = REG_READ(CM_GAMUT_REMAP_C13_C14); + s->gamut_remap_c21_c22 = REG_READ(CM_GAMUT_REMAP_C21_C22); + s->gamut_remap_c23_c24 = REG_READ(CM_GAMUT_REMAP_C23_C24); + s->gamut_remap_c31_c32 = REG_READ(CM_GAMUT_REMAP_C31_C32); + s->gamut_remap_c33_c34 = REG_READ(CM_GAMUT_REMAP_C33_C34); +} + /* Program gamut remap in bypass mode */ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) { @@ -450,6 +474,7 @@ void dpp1_dppclk_control( } static const struct dpp_funcs dcn10_dpp_funcs = { + .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 3fccf9959305..5944a3ba0409 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -44,6 +44,10 @@ #define TF_REG_LIST_DCN(id) \ SRI(CM_GAMUT_REMAP_CONTROL, CM, id),\ SRI(CM_GAMUT_REMAP_C11_C12, CM, id),\ + SRI(CM_GAMUT_REMAP_C13_C14, CM, id),\ + SRI(CM_GAMUT_REMAP_C21_C22, CM, id),\ + SRI(CM_GAMUT_REMAP_C23_C24, CM, id),\ + SRI(CM_GAMUT_REMAP_C31_C32, CM, id),\ SRI(CM_GAMUT_REMAP_C33_C34, CM, id),\ SRI(DSCL_EXT_OVERSCAN_LEFT_RIGHT, DSCL, id), \ SRI(DSCL_EXT_OVERSCAN_TOP_BOTTOM, DSCL, id), \ @@ -177,6 +181,14 @@ TF_SF(CM0_CM_GAMUT_REMAP_CONTROL, CM_GAMUT_REMAP_MODE, mask_sh),\ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C11, mask_sh),\ TF_SF(CM0_CM_GAMUT_REMAP_C11_C12, CM_GAMUT_REMAP_C12, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C13, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C13_C14, CM_GAMUT_REMAP_C14, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C21, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C21_C22, CM_GAMUT_REMAP_C22, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C23, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C23_C24, CM_GAMUT_REMAP_C24, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C31, mask_sh),\ + TF_SF(CM0_CM_GAMUT_REMAP_C31_C32, CM_GAMUT_REMAP_C32, mask_sh),\ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C33, mask_sh),\ TF_SF(CM0_CM_GAMUT_REMAP_C33_C34, CM_GAMUT_REMAP_C34, mask_sh),\ TF_SF(DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT, EXT_OVERSCAN_LEFT, mask_sh),\ @@ -524,6 +536,14 @@ type CM_GAMUT_REMAP_MODE; \ type CM_GAMUT_REMAP_C11; \ type CM_GAMUT_REMAP_C12; \ + type CM_GAMUT_REMAP_C13; \ + type CM_GAMUT_REMAP_C14; \ + type CM_GAMUT_REMAP_C21; \ + type CM_GAMUT_REMAP_C22; \ + type CM_GAMUT_REMAP_C23; \ + type CM_GAMUT_REMAP_C24; \ + type CM_GAMUT_REMAP_C31; \ + type CM_GAMUT_REMAP_C32; \ type CM_GAMUT_REMAP_C33; \ type CM_GAMUT_REMAP_C34; \ type CM_COMA_C11; \ @@ -1095,6 +1115,10 @@ struct dcn_dpp_mask { uint32_t RECOUT_SIZE; \ uint32_t CM_GAMUT_REMAP_CONTROL; \ uint32_t CM_GAMUT_REMAP_C11_C12; \ + uint32_t CM_GAMUT_REMAP_C13_C14; \ + uint32_t CM_GAMUT_REMAP_C21_C22; \ + uint32_t CM_GAMUT_REMAP_C23_C24; \ + uint32_t CM_GAMUT_REMAP_C31_C32; \ uint32_t CM_GAMUT_REMAP_C33_C34; \ uint32_t CM_COMA_C11_C12; \ uint32_t CM_COMA_C33_C34; \ @@ -1407,6 +1431,9 @@ bool dpp_get_optimal_number_of_taps( struct scaler_data *scl_data, const struct scaling_taps *in_taps); +void dpp_read_state(struct dpp *dpp_base, + struct dcn_dpp_state *s); + void dpp_reset(struct dpp *dpp_base); void dpp1_cm_program_regamma_lut( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8eea38b9e32b..9a642116f2ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -250,6 +250,47 @@ void dcn10_log_hw_state(struct dc *dc) } DTN_INFO("\n"); + DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" + " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " + "C31 C32 C33 C34\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dpp *dpp = pool->dpps[i]; + struct dcn_dpp_state s; + + dpp->funcs->dpp_read_state(dpp, &s); + + DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" + "%08xh %08xh %08xh %08xh %08xh %08xh %08xh", + dpp->inst, + s.igam_input_format, + (s.igam_lut_mode == 0) ? "BypassFixed" : + ((s.igam_lut_mode == 1) ? "BypassFloat" : + ((s.igam_lut_mode == 2) ? "RAM" : + ((s.igam_lut_mode == 3) ? "RAM" : + "Unknown"))), + (s.dgam_lut_mode == 0) ? "Bypass" : + ((s.dgam_lut_mode == 1) ? "sRGB" : + ((s.dgam_lut_mode == 2) ? "Ycc" : + ((s.dgam_lut_mode == 3) ? "RAM" : + ((s.dgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + (s.rgam_lut_mode == 0) ? "Bypass" : + ((s.rgam_lut_mode == 1) ? "sRGB" : + ((s.rgam_lut_mode == 2) ? "Ycc" : + ((s.rgam_lut_mode == 3) ? "RAM" : + ((s.rgam_lut_mode == 4) ? "RAM" : + "Unknown")))), + s.gamut_remap_mode, + s.gamut_remap_c11_c12, + s.gamut_remap_c13_c14, + s.gamut_remap_c21_c22, + s.gamut_remap_c23_c24, + s.gamut_remap_c31_c32, + s.gamut_remap_c33_c34); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n"); for (i = 0; i < pool->pipe_count; i++) { struct mpcc_state s = {0}; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h index bb7af1b1c7b3..582458f028f8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h @@ -44,7 +44,23 @@ struct dpp_grph_csc_adjustment { enum graphics_gamut_adjust_type gamut_adjust_type; }; +struct dcn_dpp_state { + uint32_t igam_lut_mode; + uint32_t igam_input_format; + uint32_t dgam_lut_mode; + uint32_t rgam_lut_mode; + uint32_t gamut_remap_mode; + uint32_t gamut_remap_c11_c12; + uint32_t gamut_remap_c13_c14; + uint32_t gamut_remap_c21_c22; + uint32_t gamut_remap_c23_c24; + uint32_t gamut_remap_c31_c32; + uint32_t gamut_remap_c33_c34; +}; + struct dpp_funcs { + void (*dpp_read_state)(struct dpp *dpp, struct dcn_dpp_state *s); + void (*dpp_reset)(struct dpp *dpp); void (*dpp_set_scaler)(struct dpp *dpp, -- cgit v1.2.1 From 7b265fd96cbfa03630a2db90b3891b8397bf2208 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 17 Apr 2018 12:12:56 -0400 Subject: drm/amd/display: update dtn logging and goldens Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 9a642116f2ef..574d37cdfa20 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -260,7 +260,7 @@ void dcn10_log_hw_state(struct dc *dc) dpp->funcs->dpp_read_state(dpp, &s); DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s" - "%08xh %08xh %08xh %08xh %08xh %08xh %08xh", + "%8x %08xh %08xh %08xh %08xh %08xh %08xh", dpp->inst, s.igam_input_format, (s.igam_lut_mode == 0) ? "BypassFixed" : -- cgit v1.2.1 From 34cb6b3860a4aecafaae0df8fa84b6fc784f507c Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 11 Apr 2018 11:51:32 -0400 Subject: drm/amd/display: compact the rq/dlg/ttu log Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 24 ++- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 235 +++++++++------------ drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 22 +- 4 files changed, 128 insertions(+), 157 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 759fcd1e666a..159bebcfd521 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -763,10 +763,10 @@ void min_set_viewport( PRI_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp1_read_state(struct hubp *hubp, - struct dcn_hubp_state *s) +void hubp1_read_state(struct hubp *hubp) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + struct dcn_hubp_state *s = &hubp1->state; struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr; struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr; struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index 02045a8c30fd..fe9b8c4a91ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -619,8 +619,29 @@ struct dcn_mi_mask { DCN_HUBP_REG_FIELD_LIST(uint32_t); }; +struct dcn_hubp_state { + struct _vcs_dpi_display_dlg_regs_st dlg_attr; + struct _vcs_dpi_display_ttu_regs_st ttu_attr; + struct _vcs_dpi_display_rq_regs_st rq_regs; + uint32_t pixel_format; + uint32_t inuse_addr_hi; + uint32_t viewport_width; + uint32_t viewport_height; + uint32_t rotation_angle; + uint32_t h_mirror_en; + uint32_t sw_mode; + uint32_t dcc_en; + uint32_t blank_en; + uint32_t underflow_status; + uint32_t ttu_disable; + uint32_t min_ttu_vblank; + uint32_t qos_level_low_wm; + uint32_t qos_level_high_wm; +}; + struct dcn10_hubp { struct hubp base; + struct dcn_hubp_state state; const struct dcn_mi_registers *hubp_regs; const struct dcn_mi_shift *hubp_shift; const struct dcn_mi_mask *hubp_mask; @@ -698,8 +719,7 @@ void dcn10_hubp_construct( const struct dcn_mi_shift *hubp_shift, const struct dcn_mi_mask *hubp_mask); -void hubp1_read_state(struct hubp *hubp, - struct dcn_hubp_state *s); +void hubp1_read_state(struct hubp *hubp); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 574d37cdfa20..572fa601a0eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -112,143 +112,127 @@ void dcn10_log_hubbub_state(struct dc *dc) DTN_INFO("\n"); } -static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s) -{ - struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; - struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; - struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; - - DTN_INFO("========Requester========\n"); - DTN_INFO("drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode); - DTN_INFO("prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode); - DTN_INFO("mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode); - DTN_INFO("crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode); - DTN_INFO("plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address); - DTN_INFO("====\n"); - DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_l.chunk_size); - DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size); - DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size); - DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size); - DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size); - DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size); - DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_l.swath_height); - DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear); - DTN_INFO("====\n"); - DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_c.chunk_size); - DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size); - DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size); - DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size); - DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size); - DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size); - DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_c.swath_height); - DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear); - - DTN_INFO("========DLG========\n"); - DTN_INFO("refcyc_h_blank_end = 0x%0x\n", dlg_regs->refcyc_h_blank_end); - DTN_INFO("dlg_vblank_end = 0x%0x\n", dlg_regs->dlg_vblank_end); - DTN_INFO("min_dst_y_next_start = 0x%0x\n", dlg_regs->min_dst_y_next_start); - DTN_INFO("refcyc_per_htotal = 0x%0x\n", dlg_regs->refcyc_per_htotal); - DTN_INFO("refcyc_x_after_scaler = 0x%0x\n", dlg_regs->refcyc_x_after_scaler); - DTN_INFO("dst_y_after_scaler = 0x%0x\n", dlg_regs->dst_y_after_scaler); - DTN_INFO("dst_y_prefetch = 0x%0x\n", dlg_regs->dst_y_prefetch); - DTN_INFO("dst_y_per_vm_vblank = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank); - DTN_INFO("dst_y_per_row_vblank = 0x%0x\n", dlg_regs->dst_y_per_row_vblank); - DTN_INFO("dst_y_per_vm_flip = 0x%0x\n", dlg_regs->dst_y_per_vm_flip); - DTN_INFO("dst_y_per_row_flip = 0x%0x\n", dlg_regs->dst_y_per_row_flip); - DTN_INFO("ref_freq_to_pix_freq = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq); - DTN_INFO("vratio_prefetch = 0x%0x\n", dlg_regs->vratio_prefetch); - DTN_INFO("vratio_prefetch_c = 0x%0x\n", dlg_regs->vratio_prefetch_c); - DTN_INFO("refcyc_per_pte_group_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l); - DTN_INFO("refcyc_per_pte_group_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c); - DTN_INFO("refcyc_per_meta_chunk_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l); - DTN_INFO("refcyc_per_meta_chunk_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c); - DTN_INFO("refcyc_per_pte_group_flip_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l); - DTN_INFO("refcyc_per_pte_group_flip_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c); - DTN_INFO("refcyc_per_meta_chunk_flip_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l); - DTN_INFO("refcyc_per_meta_chunk_flip_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c); - DTN_INFO("dst_y_per_pte_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l); - DTN_INFO("dst_y_per_pte_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c); - DTN_INFO("refcyc_per_pte_group_nom_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l); - DTN_INFO("refcyc_per_pte_group_nom_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c); - DTN_INFO("dst_y_per_meta_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l); - DTN_INFO("dst_y_per_meta_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c); - DTN_INFO("refcyc_per_meta_chunk_nom_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l); - DTN_INFO("refcyc_per_meta_chunk_nom_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c); - DTN_INFO("refcyc_per_line_delivery_pre_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l); - DTN_INFO("refcyc_per_line_delivery_pre_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c); - DTN_INFO("refcyc_per_line_delivery_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l); - DTN_INFO("refcyc_per_line_delivery_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c); - DTN_INFO("chunk_hdl_adjust_cur0 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0); - DTN_INFO("dst_y_offset_cur1 = 0x%0x\n", dlg_regs->dst_y_offset_cur1); - DTN_INFO("chunk_hdl_adjust_cur1 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1); - DTN_INFO("vready_after_vcount0 = 0x%0x\n", dlg_regs->vready_after_vcount0); - DTN_INFO("dst_y_delta_drq_limit = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit); - DTN_INFO("xfc_reg_transfer_delay = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay); - DTN_INFO("xfc_reg_precharge_delay = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay); - DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency); - - DTN_INFO("========TTU========\n"); - DTN_INFO("qos_level_low_wm = 0x%0x\n", ttu_regs->qos_level_low_wm); - DTN_INFO("qos_level_high_wm = 0x%0x\n", ttu_regs->qos_level_high_wm); - DTN_INFO("min_ttu_vblank = 0x%0x\n", ttu_regs->min_ttu_vblank); - DTN_INFO("qos_level_flip = 0x%0x\n", ttu_regs->qos_level_flip); - DTN_INFO("refcyc_per_req_delivery_pre_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l); - DTN_INFO("refcyc_per_req_delivery_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l); - DTN_INFO("refcyc_per_req_delivery_pre_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c); - DTN_INFO("refcyc_per_req_delivery_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c); - DTN_INFO("refcyc_per_req_delivery_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0); - DTN_INFO("refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0); - DTN_INFO("refcyc_per_req_delivery_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1); - DTN_INFO("refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1); - DTN_INFO("qos_level_fixed_l = 0x%0x\n", ttu_regs->qos_level_fixed_l); - DTN_INFO("qos_ramp_disable_l = 0x%0x\n", ttu_regs->qos_ramp_disable_l); - DTN_INFO("qos_level_fixed_c = 0x%0x\n", ttu_regs->qos_level_fixed_c); - DTN_INFO("qos_ramp_disable_c = 0x%0x\n", ttu_regs->qos_ramp_disable_c); - DTN_INFO("qos_level_fixed_cur0 = 0x%0x\n", ttu_regs->qos_level_fixed_cur0); - DTN_INFO("qos_ramp_disable_cur0 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0); - DTN_INFO("qos_level_fixed_cur1 = 0x%0x\n", ttu_regs->qos_level_fixed_cur1); - DTN_INFO("qos_ramp_disable_cur1 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1); -} - -void dcn10_log_hw_state(struct dc *dc) +static void dcn10_log_hubp_states(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; struct resource_pool *pool = dc->res_pool; int i; - DTN_INFO_BEGIN(); - - dcn10_log_hubbub_state(dc); - DTN_INFO("HUBP: format addr_hi width height" " rot mir sw_mode dcc_en blank_en ttu_dis underflow" " min_ttu_vblank qos_low_wm qos_high_wm\n"); for (i = 0; i < pool->pipe_count; i++) { struct hubp *hubp = pool->hubps[i]; - struct dcn_hubp_state s; + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state); - hubp->funcs->hubp_read_state(hubp, &s); + hubp->funcs->hubp_read_state(hubp); DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" " %6d %8d %7d %8xh", hubp->inst, - s.pixel_format, - s.inuse_addr_hi, - s.viewport_width, - s.viewport_height, - s.rotation_angle, - s.h_mirror_en, - s.sw_mode, - s.dcc_en, - s.blank_en, - s.ttu_disable, - s.underflow_status); - DTN_INFO_MICRO_SEC(s.min_ttu_vblank); - DTN_INFO_MICRO_SEC(s.qos_level_low_wm); - DTN_INFO_MICRO_SEC(s.qos_level_high_wm); + s->pixel_format, + s->inuse_addr_hi, + s->viewport_width, + s->viewport_height, + s->rotation_angle, + s->h_mirror_en, + s->sw_mode, + s->dcc_en, + s->blank_en, + s->ttu_disable, + s->underflow_status); + DTN_INFO_MICRO_SEC(s->min_ttu_vblank); + DTN_INFO_MICRO_SEC(s->qos_level_low_wm); + DTN_INFO_MICRO_SEC(s->qos_level_high_wm); DTN_INFO("\n"); } + + DTN_INFO("\n=========RQ========\n"); + DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" + " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" + " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode, + rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size, + rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size, + rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size, + rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height, + rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size, + rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size, + rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size, + rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear); + } + + DTN_INFO("========DLG========\n"); + DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s " + " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq" + " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll" + " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc " + " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l " + " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay" + " x_rp_dlay x_rr_sfl\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; + + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, + dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, + dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, + dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq, + dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l, + dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l, + dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l, + dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l, + dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l, + dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l, + dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l, + dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l, + dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l, + dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l, + dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, + dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, + dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, + dlg_regs->xfc_reg_remote_surface_flip_latency); + } + + DTN_INFO("========TTU========\n"); + DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c" + " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l" + " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; + + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank, + ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l, + ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0, + ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1, + ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l, + ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0, + ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1); + } DTN_INFO("\n"); +} + +void dcn10_log_hw_state(struct dc *dc) +{ + struct dc_context *dc_ctx = dc->ctx; + struct resource_pool *pool = dc->res_pool; + int i; + + DTN_INFO_BEGIN(); + + dcn10_log_hubbub_state(dc); + + dcn10_log_hubp_states(dc); DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode" " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 " @@ -340,19 +324,6 @@ void dcn10_log_hw_state(struct dc *dc) } DTN_INFO("\n"); - for (i = 0; i < pool->pipe_count; i++) { - struct hubp *hubp = pool->hubps[i]; - struct dcn_hubp_state s = {0}; - - if (!dc->current_state->res_ctx.pipe_ctx[i].stream) - continue; - - hubp->funcs->hubp_read_state(hubp, &s); - DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i); - print_rq_dlg_ttu_regs(dc_ctx, &s); - DTN_INFO("\n"); - } - DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n" "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n", dc->current_state->bw.dcn.calc_clk.dcfclk_khz, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 3866147fb02a..331f8ff57ed7 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -56,26 +56,6 @@ struct hubp { bool power_gated; }; -struct dcn_hubp_state { - struct _vcs_dpi_display_dlg_regs_st dlg_attr; - struct _vcs_dpi_display_ttu_regs_st ttu_attr; - struct _vcs_dpi_display_rq_regs_st rq_regs; - uint32_t pixel_format; - uint32_t inuse_addr_hi; - uint32_t viewport_width; - uint32_t viewport_height; - uint32_t rotation_angle; - uint32_t h_mirror_en; - uint32_t sw_mode; - uint32_t dcc_en; - uint32_t blank_en; - uint32_t underflow_status; - uint32_t ttu_disable; - uint32_t min_ttu_vblank; - uint32_t qos_level_low_wm; - uint32_t qos_level_high_wm; -}; - struct hubp_funcs { void (*hubp_setup)( struct hubp *hubp, @@ -140,7 +120,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); - void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s); + void (*hubp_read_state)(struct hubp *hubp); }; -- cgit v1.2.1 From 8ec06a179613f7b7379daf78844afe1cc301e2c7 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Fri, 13 Apr 2018 13:23:02 -0400 Subject: drm/amd/display: Add assert that chroma pitch is non zero Signed-off-by: Julian Parkin Reviewed-by: Aric Cyr Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 159bebcfd521..0cbc83edd37f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -146,6 +146,9 @@ void hubp1_program_size_and_rotation( * 444 or 420 luma */ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + ASSERT(plane_size->video.chroma_pitch != 0); + /* Chroma pitch zero can cause system hang! */ + pitch = plane_size->video.luma_pitch - 1; meta_pitch = dcc->video.meta_pitch_l - 1; pitch_c = plane_size->video.chroma_pitch - 1; -- cgit v1.2.1 From 7ab3fdde04218c4733e96712b651751c413d51c3 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Tue, 17 Apr 2018 13:49:48 -0400 Subject: drm/amd/display: Update MST edid property every time Extended fix to: "Don't read EDID in atomic_check" Fix display property not observed in GUI display after hot plug. Call drm_mode_connector_update_edid_property every time in .get_modes hook, due to the fact that edid property is getting removed from usermode ioctl DRM_IOCTL_MODE_GETCONNECTOR each time in hot unplug. Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 305292a9ff80..8c1d084429dc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -253,11 +253,11 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) amdgpu_dm_add_sink_to_freesync_module( connector, edid); - - drm_mode_connector_update_edid_property( - &aconnector->base, edid); } + drm_mode_connector_update_edid_property( + &aconnector->base, aconnector->edid); + ret = drm_add_edid_modes(connector, aconnector->edid); return ret; -- cgit v1.2.1 From 6b622181230e7c9286f594e4e51266b6f019b031 Mon Sep 17 00:00:00 2001 From: Julian Parkin Date: Tue, 17 Apr 2018 11:49:06 -0400 Subject: drm/amd/display: reprogram infoframe during apply_ctx_to_hw To ensure the infoframe gets updated during an SDR/HDR switch this change adds a new function to to check if the HDR static metadata has changed and adds it to is_timing_changed and pipe_need_reprogram checks Signed-off-by: Julian Parkin Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 447729cd29f0..4de1b443e438 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1310,6 +1310,19 @@ bool dc_add_all_planes_for_stream( } +static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream, + struct dc_stream_state *new_stream) +{ + if (cur_stream == NULL) + return true; + + if (memcmp(&cur_stream->hdr_static_metadata, + &new_stream->hdr_static_metadata, + sizeof(struct dc_info_packet)) != 0) + return true; + + return false; +} static bool is_timing_changed(struct dc_stream_state *cur_stream, struct dc_stream_state *new_stream) @@ -1345,6 +1358,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (is_hdr_static_meta_changed(stream_a, stream_b)) + return false; + return true; } @@ -2548,6 +2564,8 @@ bool pipe_need_reprogram( if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream)) return true; + if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream)) + return true; return false; } -- cgit v1.2.1 From 70ee2def89e678940190b2c2f7d65fdef5647e07 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Wed, 9 May 2018 14:15:16 -0500 Subject: drm/amd/display: Check dc_sink every time in MST hotplug Extended fix to: "Don't read EDID in atomic_check" Fix issue of missing dc_sink in .mode_valid in hot plug routine. Need to check dc_sink everytime in .get_modes hook after checking edid, since edid is not getting removed in hot unplug but dc_sink doesn't. Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Roman Li Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8c1d084429dc..ace9ad578ca0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -226,10 +226,6 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!aconnector->edid) { struct edid *edid; - struct dc_sink *dc_sink; - struct dc_sink_init_data init_params = { - .link = aconnector->dc_link, - .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); if (!edid) { @@ -240,11 +236,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) } aconnector->edid = edid; + } + if (!aconnector->dc_sink) { + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; dc_sink = dc_link_add_remote_sink( aconnector->dc_link, - (uint8_t *)edid, - (edid->extensions + 1) * EDID_LENGTH, + (uint8_t *)aconnector->edid, + (aconnector->edid->extensions + 1) * EDID_LENGTH, &init_params); dc_sink->priv = aconnector; @@ -252,7 +254,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink) amdgpu_dm_add_sink_to_freesync_module( - connector, edid); + connector, aconnector->edid); } drm_mode_connector_update_edid_property( -- cgit v1.2.1 From 3bff0796e90960b0abfea21c7f2b50d2d6a19bd9 Mon Sep 17 00:00:00 2001 From: Martin Tsai Date: Tue, 17 Apr 2018 17:20:06 -0400 Subject: drm/amd/display: to synchronize the hubp and dpp programming in cursor control Signed-off-by: Martin Tsai Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_stream.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 3b2ddbd8c054..3732a1de9d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -178,6 +178,7 @@ bool dc_stream_set_cursor_attributes( int i; struct dc *core_dc; struct resource_context *res_ctx; + struct pipe_ctx *pipe_to_program = NULL; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); @@ -205,9 +206,17 @@ bool dc_stream_set_cursor_attributes( if (pipe_ctx->top_pipe && pipe_ctx->plane_state != pipe_ctx->top_pipe->plane_state) continue; + if (!pipe_to_program) { + pipe_to_program = pipe_ctx; + core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + } core_dc->hwss.set_cursor_attribute(pipe_ctx); } + + if (pipe_to_program) + core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + return true; } @@ -218,6 +227,7 @@ bool dc_stream_set_cursor_position( int i; struct dc *core_dc; struct resource_context *res_ctx; + struct pipe_ctx *pipe_to_program = NULL; if (NULL == stream) { dm_error("DC: dc_stream is NULL!\n"); @@ -243,9 +253,17 @@ bool dc_stream_set_cursor_position( !pipe_ctx->plane_res.ipp) continue; + if (!pipe_to_program) { + pipe_to_program = pipe_ctx; + core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true); + } + core_dc->hwss.set_cursor_position(pipe_ctx); } + if (pipe_to_program) + core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, false); + return true; } -- cgit v1.2.1 From 34ff937029edf708d7db0d450b5f505969b68950 Mon Sep 17 00:00:00 2001 From: Tony Cheng Date: Mon, 16 Apr 2018 13:30:41 -0400 Subject: drm/amd/display: dal 3.1.44 Signed-off-by: Tony Cheng Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 92152980b0ce..cd4f4341cb53 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -38,7 +38,7 @@ #include "inc/compressor.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.1.43" +#define DC_VER "3.1.44" #define MAX_SURFACES 3 #define MAX_STREAMS 6 -- cgit v1.2.1 From 403dc5e8aab7b3d18a625e516698ea49d2be432e Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 10 Apr 2018 16:06:34 -0400 Subject: drm/amd/display: Use int for calculating vline start We are not sure these calculations will never need negative numbers. Use signed integers and warn and cap at 0 if this ever happens. Signed-off-by: Harry Wentland Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 2c5dbece928e..c734b7fa5835 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -96,10 +96,10 @@ static void optc1_disable_stereo(struct timing_generator *optc) static uint32_t get_start_vline(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing) { struct dc_crtc_timing patched_crtc_timing; - uint32_t vesa_sync_start; - uint32_t asic_blank_end; - uint32_t interlace_factor; - uint32_t vertical_line_start; + int vesa_sync_start; + int asic_blank_end; + int interlace_factor; + int vertical_line_start; patched_crtc_timing = *dc_crtc_timing; optc1_apply_front_porch_workaround(optc, &patched_crtc_timing); -- cgit v1.2.1 From ada8ce1530a7d1bf2e37560afa57bad911d36e81 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 20 Apr 2018 10:53:50 -0400 Subject: drm/amd/display: Couple formatting fixes Things such as mis-indent, and space at beginning of line. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 6 +++--- drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 96a57be3ceb6..3be17e26120d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2692,6 +2692,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) const struct dc_link *link = aconnector->dc_link; struct amdgpu_device *adev = connector->dev->dev_private; struct amdgpu_display_manager *dm = &adev->dm; + #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) @@ -4649,7 +4650,7 @@ static int dm_update_crtcs_state(struct dc *dc, if (aconnector && enable) { // Make sure fake sink is created in plug-in scenario new_con_state = drm_atomic_get_connector_state(state, - &aconnector->base); + &aconnector->base); if (IS_ERR(new_con_state)) { ret = PTR_ERR_OR_ZERO(new_con_state); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 4de1b443e438..9eb731fb5251 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1798,9 +1798,9 @@ enum dc_status dc_validate_global_state( return DC_ERROR_UNEXPECTED; if (dc->res_pool->funcs->validate_global) { - result = dc->res_pool->funcs->validate_global(dc, new_ctx); - if (result != DC_OK) - return result; + result = dc->res_pool->funcs->validate_global(dc, new_ctx); + if (result != DC_OK) + return result; } for (i = 0; i < new_ctx->stream_count; i++) { diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 0a476636c5c7..00c0a1ef15eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -430,7 +430,7 @@ static struct stream_encoder *dce112_stream_encoder_create( if (!enc110) return NULL; - + dce110_stream_encoder_construct(enc110, ctx, ctx->dc_bios, eng_id, &stream_enc_regs[eng_id], &se_shift, &se_mask); -- cgit v1.2.1 From 868b83511ba92b24ce5c5d852cf16b1bc07e13a7 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 20 Apr 2018 11:05:07 -0400 Subject: drm/amd/display: Add VG12 ASIC IDs Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 3e8e535e08f2..1b987b6a347d 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -120,9 +120,14 @@ #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 +#define AI_UNKNOWN 0xFF -#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_UNKNOWN) -#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_UNKNOWN) +#define AI_VEGA12_P_A0 20 +#define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0) +#define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0) + +#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) +#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) /* DCN1_0 */ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ -- cgit v1.2.1 From 3aabfcd70077743a5943acd86e70604945e384c1 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Mon, 5 Mar 2018 14:59:57 -0500 Subject: drm/amd: Add BIOS smu_info v3_3 required struct def. Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 170 ++++++++++++++++++++++++++++- 1 file changed, 168 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index 0f5ad54d3fd3..de177ce8ca80 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -501,6 +501,32 @@ enum atom_cooling_solution_id{ LIQUID_COOLING = 0x01 }; +struct atom_firmware_info_v3_2 { + struct atom_common_table_header table_header; + uint32_t firmware_revision; + uint32_t bootup_sclk_in10khz; + uint32_t bootup_mclk_in10khz; + uint32_t firmware_capability; // enum atombios_firmware_capability + uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */ + uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address + uint16_t bootup_vddc_mv; + uint16_t bootup_vddci_mv; + uint16_t bootup_mvddc_mv; + uint16_t bootup_vddgfx_mv; + uint8_t mem_module_id; + uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */ + uint8_t reserved1[2]; + uint32_t mc_baseaddr_high; + uint32_t mc_baseaddr_low; + uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def + uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id + uint8_t board_i2c_feature_slave_addr; + uint8_t reserved3; + uint16_t bootup_mvddq_mv; + uint16_t bootup_mvpp_mv; + uint32_t zfbstartaddrin16mb; + uint32_t reserved2[3]; +}; /* *************************************************************************** @@ -1169,7 +1195,29 @@ struct atom_gfx_info_v2_2 uint32_t rlc_gpu_timer_refclk; }; - +struct atom_gfx_info_v2_3 { + struct atom_common_table_header table_header; + uint8_t gfxip_min_ver; + uint8_t gfxip_max_ver; + uint8_t max_shader_engines; + uint8_t max_tile_pipes; + uint8_t max_cu_per_sh; + uint8_t max_sh_per_se; + uint8_t max_backends_per_se; + uint8_t max_texture_channel_caches; + uint32_t regaddr_cp_dma_src_addr; + uint32_t regaddr_cp_dma_src_addr_hi; + uint32_t regaddr_cp_dma_dst_addr; + uint32_t regaddr_cp_dma_dst_addr_hi; + uint32_t regaddr_cp_dma_command; + uint32_t regaddr_cp_status; + uint32_t regaddr_rlc_gpu_clock_32; + uint32_t rlc_gpu_timer_refclk; + uint8_t active_cu_per_sh; + uint8_t active_rb_per_se; + uint16_t gcgoldenoffset; + uint32_t rm21_sram_vmin_value; +}; /* *************************************************************************** @@ -1198,6 +1246,76 @@ struct atom_smu_info_v3_1 uint8_t fw_ctf_polarity; // GPIO polarity for CTF }; +struct atom_smu_info_v3_2 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t reserved[5]; +}; + +struct atom_smu_info_v3_3 { + struct atom_common_table_header table_header; + uint8_t smuip_min_ver; + uint8_t smuip_max_ver; + uint8_t smu_rsd1; + uint8_t gpuclk_ss_mode; + uint16_t sclk_ss_percentage; + uint16_t sclk_ss_rate_10hz; + uint16_t gpuclk_ss_percentage; // in unit of 0.001% + uint16_t gpuclk_ss_rate_10hz; + uint32_t core_refclk_10khz; + uint8_t ac_dc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for AC/DC switching, =0xff means invalid + uint8_t ac_dc_polarity; // GPIO polarity for AC/DC switching + uint8_t vr0hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR0 HOT event, =0xff means invalid + uint8_t vr0hot_polarity; // GPIO polarity for VR0 HOT event + uint8_t vr1hot_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for VR1 HOT event , =0xff means invalid + uint8_t vr1hot_polarity; // GPIO polarity for VR1 HOT event + uint8_t fw_ctf_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for CTF, =0xff means invalid + uint8_t fw_ctf_polarity; // GPIO polarity for CTF + uint8_t pcc_gpio_bit; // GPIO bit shift in SMU_GPIOPAD_A configured for PCC, =0xff means invalid + uint8_t pcc_gpio_polarity; // GPIO polarity for CTF + uint16_t smugoldenoffset; + uint32_t gpupll_vco_freq_10khz; + uint32_t bootup_smnclk_10khz; + uint32_t bootup_socclk_10khz; + uint32_t bootup_mp0clk_10khz; + uint32_t bootup_mp1clk_10khz; + uint32_t bootup_lclk_10khz; + uint32_t bootup_dcefclk_10khz; + uint32_t ctf_threshold_override_value; + uint32_t syspll3_0_vco_freq_10khz; + uint32_t syspll3_1_vco_freq_10khz; + uint32_t bootup_fclk_10khz; + uint32_t bootup_waflclk_10khz; + uint32_t reserved[3]; +}; + /* *************************************************************************** Data Table smc_dpm_info structure @@ -1283,7 +1401,6 @@ struct atom_smc_dpm_info_v4_1 uint32_t boardreserved[10]; }; - /* *************************************************************************** Data Table asic_profiling_info structure @@ -1864,6 +1981,55 @@ enum atom_smu9_syspll0_clock_id SMU9_SYSPLL0_DISPCLK_ID = 11, // DISPCLK }; +enum atom_smu11_syspll_id { + SMU11_SYSPLL0_ID = 0, + SMU11_SYSPLL1_0_ID = 1, + SMU11_SYSPLL1_1_ID = 2, + SMU11_SYSPLL1_2_ID = 3, + SMU11_SYSPLL2_ID = 4, + SMU11_SYSPLL3_0_ID = 5, + SMU11_SYSPLL3_1_ID = 6, +}; + + +enum atom_smu11_syspll0_clock_id { + SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK + SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK + SMU11_SYSPLL0_DCLK_ID = 2, // DCLK + SMU11_SYSPLL0_VCLK_ID = 3, // VCLK + SMU11_SYSPLL0_ECLK_ID = 4, // ECLK + SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK +}; + + +enum atom_smu11_syspll1_0_clock_id { + SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a +}; + +enum atom_smu11_syspll1_1_clock_id { + SMU11_SYSPLL1_0_UCLKB_ID = 0, // UCLK_b +}; + +enum atom_smu11_syspll1_2_clock_id { + SMU11_SYSPLL1_0_FCLK_ID = 0, // FCLK +}; + +enum atom_smu11_syspll2_clock_id { + SMU11_SYSPLL2_GFXCLK_ID = 0, // GFXCLK +}; + +enum atom_smu11_syspll3_0_clock_id { + SMU11_SYSPLL3_0_WAFCLK_ID = 0, // WAFCLK + SMU11_SYSPLL3_0_DISPCLK_ID = 1, // DISPCLK + SMU11_SYSPLL3_0_DPREFCLK_ID = 2, // DPREFCLK +}; + +enum atom_smu11_syspll3_1_clock_id { + SMU11_SYSPLL3_1_MP1CLK_ID = 0, // MP1CLK + SMU11_SYSPLL3_1_SMNCLK_ID = 1, // SMNCLK + SMU11_SYSPLL3_1_LCLK_ID = 2, // LCLK +}; + struct atom_get_smu_clock_info_output_parameters_v3_1 { union { -- cgit v1.2.1 From d66057830c12855253a5c2d246ef26a66b43e2d5 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 20 Apr 2018 10:56:18 -0400 Subject: drm/amd/display: Add get_firmware_info_v3_2 for VG12 Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 86 +++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 985fe8c22875..10a5807a7e8b 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -70,6 +70,10 @@ static enum bp_result get_firmware_info_v3_1( struct bios_parser *bp, struct dc_firmware_info *info); +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info); + static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp, struct atom_display_object_path_v2 *object); @@ -1321,9 +1325,11 @@ static enum bp_result bios_parser_get_firmware_info( case 3: switch (revision.minor) { case 1: - case 2: result = get_firmware_info_v3_1(bp, info); break; + case 2: + result = get_firmware_info_v3_2(bp, info); + break; default: break; } @@ -1383,6 +1389,84 @@ static enum bp_result get_firmware_info_v3_1( return BP_RESULT_OK; } +static enum bp_result get_firmware_info_v3_2( + struct bios_parser *bp, + struct dc_firmware_info *info) +{ + struct atom_firmware_info_v3_2 *firmware_info; + struct atom_display_controller_info_v4_1 *dce_info = NULL; + struct atom_common_table_header *header; + struct atom_data_revision revision; + struct atom_smu_info_v3_2 *smu_info_v3_2 = NULL; + struct atom_smu_info_v3_3 *smu_info_v3_3 = NULL; + + if (!info) + return BP_RESULT_BADINPUT; + + firmware_info = GET_IMAGE(struct atom_firmware_info_v3_2, + DATA_TABLES(firmwareinfo)); + + dce_info = GET_IMAGE(struct atom_display_controller_info_v4_1, + DATA_TABLES(dce_info)); + + if (!firmware_info || !dce_info) + return BP_RESULT_BADBIOSTABLE; + + memset(info, 0, sizeof(*info)); + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(smu_info)); + get_atom_data_table_revision(header, &revision); + + if (revision.minor == 2) { + /* Vega12 */ + smu_info_v3_2 = GET_IMAGE(struct atom_smu_info_v3_2, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_2) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_2->bootup_dcefclk_10khz * 10; + } else if (revision.minor == 3) { + /* Vega20 */ + smu_info_v3_3 = GET_IMAGE(struct atom_smu_info_v3_3, + DATA_TABLES(smu_info)); + + if (!smu_info_v3_3) + return BP_RESULT_BADBIOSTABLE; + + info->default_engine_clk = smu_info_v3_3->bootup_dcefclk_10khz * 10; + } + + // We need to convert from 10KHz units into KHz units. + info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10; + + /* 27MHz for Vega10 & Vega12; 100MHz for Vega20 */ + info->pll_info.crystal_frequency = dce_info->dce_refclk_10khz * 10; + /* Hardcode frequency if BIOS gives no DCE Ref Clk */ + if (info->pll_info.crystal_frequency == 0) { + if (revision.minor == 2) + info->pll_info.crystal_frequency = 27000; + else if (revision.minor == 3) + info->pll_info.crystal_frequency = 100000; + } + /*dp_phy_ref_clk is not correct for atom_display_controller_info_v4_2, but we don't use it*/ + info->dp_phy_ref_clk = dce_info->dpphy_refclk_10khz * 10; + info->i2c_engine_ref_clk = dce_info->i2c_engine_refclk_10khz * 10; + + /* Get GPU PLL VCO Clock */ + if (bp->cmd_tbl.get_smu_clock_info != NULL) { + if (revision.minor == 2) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU9_SYSPLL0_ID) * 10; + else if (revision.minor == 3) + info->smu_gpu_pll_output_freq = + bp->cmd_tbl.get_smu_clock_info(bp, SMU11_SYSPLL3_0_ID) * 10; + } + + return BP_RESULT_OK; +} + static enum bp_result bios_parser_get_encoder_cap_info( struct dc_bios *dcb, struct graphics_object_id object_id, -- cgit v1.2.1 From 8a61bc085ffab3071c59efcbeff4044c034e7490 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 24 Apr 2018 10:49:20 -0400 Subject: drm/amd/display: Don't return ddc result and read_bytes in same return value The two ranges overlap. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 20 ++++++++++++-------- drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c | 10 +++++++--- drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h | 5 +++-- 3 files changed, 22 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index ace9ad578ca0..4304d9e408b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -83,21 +83,22 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ? I2C_MOT_TRUE : I2C_MOT_FALSE; enum ddc_result res; - ssize_t read_bytes; + uint32_t read_bytes = msg->size; if (WARN_ON(msg->size > 16)) return -E2BIG; switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, false, I2C_MOT_UNDEF, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_NATIVE_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -108,14 +109,15 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, msg->size); break; case DP_AUX_I2C_READ: - read_bytes = dal_ddc_service_read_dpcd_data( + res = dal_ddc_service_read_dpcd_data( TO_DM_AUX(aux)->ddc_service, true, mot, msg->address, msg->buffer, - msg->size); - return read_bytes; + msg->size, + &read_bytes); + break; case DP_AUX_I2C_WRITE: res = dal_ddc_service_write_dpcd_data( TO_DM_AUX(aux)->ddc_service, @@ -137,7 +139,9 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, r == DDC_RESULT_SUCESSFULL); #endif - return msg->size; + if (res != DDC_RESULT_SUCESSFULL) + return -EIO; + return read_bytes; } static enum drm_connector_status diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 49c2face1e7a..ae48d603ebd6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -629,13 +629,14 @@ bool dal_ddc_service_query_ddc_data( return ret; } -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len) + uint32_t len, + uint32_t *read) { struct aux_payload read_payload = { .i2c_over_aux = i2c, @@ -652,6 +653,8 @@ ssize_t dal_ddc_service_read_dpcd_data( .mot = mot }; + *read = 0; + if (len > DEFAULT_AUX_MAX_DATA_SIZE) { BREAK_TO_DEBUGGER(); return DDC_RESULT_FAILED_INVALID_OPERATION; @@ -661,7 +664,8 @@ ssize_t dal_ddc_service_read_dpcd_data( ddc->ctx->i2caux, ddc->ddc_pin, &command)) { - return (ssize_t)command.payloads->length; + *read = command.payloads->length; + return DDC_RESULT_SUCESSFULL; } return DDC_RESULT_FAILED_OPERATION; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index 090b7a8dd67b..30b3a08b91be 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -102,13 +102,14 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); -ssize_t dal_ddc_service_read_dpcd_data( +enum ddc_result dal_ddc_service_read_dpcd_data( struct ddc_service *ddc, bool i2c, enum i2c_mot_mode mot, uint32_t address, uint8_t *data, - uint32_t len); + uint32_t len, + uint32_t *read); enum ddc_result dal_ddc_service_write_dpcd_data( struct ddc_service *ddc, -- cgit v1.2.1 From f3336254263c3b6a7734379084f4873a335f1577 Mon Sep 17 00:00:00 2001 From: Xiaojie Yuan Date: Thu, 26 Apr 2018 18:50:26 +0800 Subject: drm/amdgpu/uvd7: add emit_reg_write_reg_wait ring callback Fix the NULL pointer dereference while running amdgpu_test: [ 54.972246] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [ 54.972265] IP: (null) [ 54.972273] PGD 0 P4D 0 [ 54.972280] Oops: 0010 [#1] SMP PTI [ 54.972288] Modules linked in: amdkfd amd_iommu_v2 amdgpu(OE) chash gpu_sched ttm drm_kms_helper drm i2c_algo_bit fb_sys_fops syscopyarea sysfillrect sysimgblt snd_hda_codec_realtek snd_hda_codec_generic snd_hda_codec_hdmi snd_hda_intel snd_hda_codec snd_hda_core snd_hwdep intel_rapl snd_pcm snd_seq_midi snd_seq_midi_event snd_rawmidi x86_pkg_temp_thermal intel_powerclamp coretemp kvm_intel snd_seq snd_seq_device kvm irqbypass snd_timer crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc snd soundcore joydev input_leds aesni_intel aes_x86_64 crypto_simd glue_helper cryptd idma64 virt_dma mei_me intel_lpss_pci serio_raw intel_cstate intel_rapl_perf shpchp intel_pch_thermal mei mac_hid intel_lpss acpi_pad parport_pc ppdev nfsd lp auth_rpcgss nfs_acl lockd grace sunrpc parport autofs4 hid_generic [ 54.972434] usbhid mxm_wmi e1000e psmouse ahci hid libahci wmi pinctrl_sunrisepoint video pinctrl_intel [ 54.972457] CPU: 6 PID: 1393 Comm: uvd Tainted: G OE 4.16.0-rc7-27fb84fda777 #1 [ 54.972473] Hardware name: MSI MS-7984/Z170 KRAIT GAMING (MS-7984), BIOS B.80 05/11/2016 [ 54.972489] RIP: 0010: (null) [ 54.972497] RSP: 0018:ffffaea002c8bcc0 EFLAGS: 00010202 [ 54.972508] RAX: 0000000000000000 RBX: ffff9d30d3c56f60 RCX: 00000000007c0002 [ 54.972522] RDX: 000000000001a6fb RSI: 000000000001a6e9 RDI: ffff9d30d3c56f60 [ 54.972536] RBP: ffffaea002c8bd10 R08: 0000000000000002 R09: ffffffffc06977d0 [ 54.972550] R10: 0000000000000040 R11: 0000000000000000 R12: 0000000000000002 [ 54.972564] R13: ffff9d30d3c5001c R14: ffff9d30d3c50000 R15: 0000000000000006 [ 54.972579] FS: 0000000000000000(0000) GS:ffff9d30eed80000(0000) knlGS:0000000000000000 [ 54.972594] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 54.972606] CR2: 0000000000000000 CR3: 00000002dbc0a001 CR4: 00000000003606e0 [ 54.972620] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 54.972634] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 54.972648] Call Trace: [ 54.972685] ? gmc_v9_0_emit_flush_gpu_tlb+0x111/0x140 [amdgpu] [ 54.972721] uvd_v7_0_ring_emit_vm_flush+0x31/0x70 [amdgpu] [ 54.972751] amdgpu_vm_flush+0x5dc/0x6c0 [amdgpu] [ 54.972787] ? pp_dpm_powergate_uvd+0x50/0x80 [amdgpu] [ 54.972816] amdgpu_ib_schedule+0x120/0x4e0 [amdgpu] [ 54.972850] amdgpu_job_run+0x17b/0x1c0 [amdgpu] [ 54.972861] drm_sched_main+0x2cc/0x490 [gpu_sched] [ 54.972873] ? wait_woken+0x80/0x80 [ 54.972882] kthread+0x121/0x140 [ 54.972891] ? drm_sched_job_finish+0xf0/0xf0 [gpu_sched] [ 54.972902] ? kthread_create_worker_on_cpu+0x70/0x70 [ 54.972914] ret_from_fork+0x35/0x40 [ 54.972922] Code: Bad RIP value. [ 54.972932] RIP: (null) RSP: ffffaea002c8bcc0 [ 54.972943] CR2: 0000000000000000 [ 54.972951] ---[ end trace 5feb349263bbf633 ]--- Signed-off-by: Xiaojie Yuan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 280c0826e183..2251db4048f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1671,6 +1671,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .end_use = amdgpu_uvd_ring_end_use, .emit_wreg = uvd_v7_0_ring_emit_wreg, .emit_reg_wait = uvd_v7_0_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { -- cgit v1.2.1 From f7dbe9186d85dd63df7868d408fea6859281c446 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michel=20D=C3=A4nzer?= Date: Tue, 17 Apr 2018 12:25:22 +0200 Subject: drm/amd/display: Use kvzalloc for potentially large allocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allocating up to 32 physically contiguous pages can easily fail (and has failed for me), and isn't necessary anyway. Reviewed-by: Harry Wentland Signed-off-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_surface.c | 14 ++--- .../drm/amd/display/modules/color/color_gamma.c | 72 ++++++++++++---------- 2 files changed, 45 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c index 959387705965..68a71adeb12e 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c @@ -72,8 +72,8 @@ struct dc_plane_state *dc_create_plane_state(struct dc *dc) { struct dc *core_dc = dc; - struct dc_plane_state *plane_state = kzalloc(sizeof(*plane_state), - GFP_KERNEL); + struct dc_plane_state *plane_state = kvzalloc(sizeof(*plane_state), + GFP_KERNEL); if (NULL == plane_state) return NULL; @@ -126,7 +126,7 @@ static void dc_plane_state_free(struct kref *kref) { struct dc_plane_state *plane_state = container_of(kref, struct dc_plane_state, refcount); destruct(plane_state); - kfree(plane_state); + kvfree(plane_state); } void dc_plane_state_release(struct dc_plane_state *plane_state) @@ -142,7 +142,7 @@ void dc_gamma_retain(struct dc_gamma *gamma) static void dc_gamma_free(struct kref *kref) { struct dc_gamma *gamma = container_of(kref, struct dc_gamma, refcount); - kfree(gamma); + kvfree(gamma); } void dc_gamma_release(struct dc_gamma **gamma) @@ -153,7 +153,7 @@ void dc_gamma_release(struct dc_gamma **gamma) struct dc_gamma *dc_create_gamma(void) { - struct dc_gamma *gamma = kzalloc(sizeof(*gamma), GFP_KERNEL); + struct dc_gamma *gamma = kvzalloc(sizeof(*gamma), GFP_KERNEL); if (gamma == NULL) goto alloc_fail; @@ -173,7 +173,7 @@ void dc_transfer_func_retain(struct dc_transfer_func *tf) static void dc_transfer_func_free(struct kref *kref) { struct dc_transfer_func *tf = container_of(kref, struct dc_transfer_func, refcount); - kfree(tf); + kvfree(tf); } void dc_transfer_func_release(struct dc_transfer_func *tf) @@ -183,7 +183,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf) struct dc_transfer_func *dc_create_transfer_func() { - struct dc_transfer_func *tf = kzalloc(sizeof(*tf), GFP_KERNEL); + struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL); if (tf == NULL) goto alloc_fail; diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index ad0ff50305ce..15e5b72e6e00 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -1274,19 +1274,19 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + 3), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1338,13 +1338,13 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(rgb_regamma); + kvfree(rgb_regamma); rgb_regamma_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; } @@ -1480,19 +1480,19 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, input_tf->type = TF_TYPE_DISTRIBUTED_POINTS; - rgb_user = kzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + rgb_user = kvzalloc(sizeof(*rgb_user) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_user) goto rgb_user_alloc_fail; - curve = kzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), - GFP_KERNEL); + curve = kvzalloc(sizeof(*curve) * (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!curve) goto curve_alloc_fail; - axix_x = kzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), - GFP_KERNEL); + axix_x = kvzalloc(sizeof(*axix_x) * (ramp->num_entries + _EXTRA_POINTS), + GFP_KERNEL); if (!axix_x) goto axix_x_alloc_fail; - coeff = kzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); + coeff = kvzalloc(sizeof(*coeff) * (MAX_HW_POINTS + _EXTRA_POINTS), GFP_KERNEL); if (!coeff) goto coeff_alloc_fail; @@ -1534,13 +1534,13 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, ret = true; - kfree(coeff); + kvfree(coeff); coeff_alloc_fail: - kfree(axix_x); + kvfree(axix_x); axix_x_alloc_fail: - kfree(curve); + kvfree(curve); curve_alloc_fail: - kfree(rgb_user); + kvfree(rgb_user); rgb_user_alloc_fail: return ret; @@ -1569,8 +1569,9 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 7; @@ -1590,11 +1591,12 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_regamma = kzalloc(sizeof(*rgb_regamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_regamma = kvzalloc(sizeof(*rgb_regamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_regamma) goto rgb_regamma_alloc_fail; points->end_exponent = 0; @@ -1612,7 +1614,7 @@ bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_regamma); + kvfree(rgb_regamma); } rgb_regamma_alloc_fail: return ret; @@ -1636,8 +1638,9 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; } else if (trans == TRANSFER_FUNCTION_PQ) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1652,11 +1655,12 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } else if (trans == TRANSFER_FUNCTION_SRGB || trans == TRANSFER_FUNCTION_BT709) { - rgb_degamma = kzalloc(sizeof(*rgb_degamma) * (MAX_HW_POINTS + - _EXTRA_POINTS), GFP_KERNEL); + rgb_degamma = kvzalloc(sizeof(*rgb_degamma) * + (MAX_HW_POINTS + _EXTRA_POINTS), + GFP_KERNEL); if (!rgb_degamma) goto rgb_degamma_alloc_fail; @@ -1670,7 +1674,7 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, } ret = true; - kfree(rgb_degamma); + kvfree(rgb_degamma); } points->end_exponent = 0; points->x_point_at_y1_red = 1; -- cgit v1.2.1 From 87ac8fb08bc7c69a39842c73f3a9d06eb73f02cc Mon Sep 17 00:00:00 2001 From: Shirish S Date: Wed, 25 Apr 2018 14:42:28 +0530 Subject: drm/amd/display: disable FBC on underlay pipe FBC is not applicable for the underlay pipe, hence disallow enabling and disabling of the same. This also fixes the BUG hit of calling sleep in atomic context. Signed-off-by: Shirish S Reviewed-by: Roman Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 51c6c70a4a30..2288d0aa773b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2747,6 +2747,9 @@ static void dce110_program_front_end_for_pipe( struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; +#if defined(CONFIG_DRM_AMD_DC_FBC) + unsigned int underlay_idx = dc->res_pool->underlay_pipe_index; +#endif unsigned int i; DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); @@ -2788,7 +2791,9 @@ static void dce110_program_front_end_for_pipe( program_scaler(dc, pipe_ctx); #if defined(CONFIG_DRM_AMD_DC_FBC) - if (dc->fbc_compressor && old_pipe->stream) { + /* fbc not applicable on Underlay pipe */ + if (dc->fbc_compressor && old_pipe->stream && + pipe_ctx->pipe_idx != underlay_idx) { if (plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) dc->fbc_compressor->funcs->disable_fbc(dc->fbc_compressor); else -- cgit v1.2.1 From 719a39a1e9b2dfbfb86f17a8da696b714a3b885d Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Mon, 30 Apr 2018 10:04:42 -0400 Subject: drm/amdgpu: Switch to interruptable wait to recover from ring hang. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: Use dma_fence_wait instead of dma_fence_wait_timeout(...,MAX_SCHEDULE_TIMEOUT) Avoid printing error message for ERESTARTSYS Originally-by: David Panariti Signed-off-by: Andrey Grodzovsky Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index eb80edfb1b0a..6741a62a7d15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -421,9 +421,11 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) if (other) { signed long r; - r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); + r = dma_fence_wait(other, true); if (r < 0) { - DRM_ERROR("Error (%ld) waiting for fence!\n", r); + if (r != -ERESTARTSYS) + DRM_ERROR("Error (%ld) waiting for fence!\n", r); + return r; } } -- cgit v1.2.1 From fc5a136ddad944d2f909d3ffcde924b7afa792f4 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Apr 2018 13:46:08 +0800 Subject: drm/amd/pp: Skip fan attributes if fan not present With powerplay enabled, also need to skip fan attributes if no fan present. Reviewed-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 21 ++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 3 +++ 2 files changed, 11 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index d9802d938e33..2c821262e262 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1364,19 +1364,14 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - /* handle non-powerplay limitations */ - if (!adev->powerplay.pp_handle) { - /* Skip fan attributes if fan is not present */ - if (adev->pm.no_fan && - (attr == &sensor_dev_attr_pwm1.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) - return 0; - /* requires powerplay */ - if (attr == &sensor_dev_attr_fan1_input.dev_attr.attr) - return 0; - } + + /* Skip fan attributes if fan is not present */ + if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || + attr == &sensor_dev_attr_fan1_input.dev_attr.attr)) + return 0; /* Skip limit attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index eecb11824412..71b42331f185 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -229,6 +229,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) if (ret) goto err; + ((struct amdgpu_device *)hwmgr->adev)->pm.no_fan = + hwmgr->thermal_controller.fanInfo.bNoFan; + ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) goto err1; -- cgit v1.2.1 From 51d45cbc9196b07f3fc66df5dafd3010c04913a3 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 27 Apr 2018 14:09:30 +0800 Subject: drm/amdgpu: Fix display corruption on CI with dpm enabled with dpm enabled, need to get active crtcs in dc/no-dc mode. caused by 'commit ebb649667a31 ("drm/amdgpu: Set pm_display_cfg in non-dc mode")' Reviewed-by: Huang Rui Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 2c821262e262..b455da487782 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -1878,26 +1878,26 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - if (!amdgpu_device_has_dc_support(adev)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); - mutex_unlock(&adev->pm.mutex); - } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if (!amdgpu_device_has_dc_support(adev)) { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + if (adev->powerplay.pp_funcs->display_configuration_change) + adev->powerplay.pp_funcs->display_configuration_change( + adev->powerplay.pp_handle, + &adev->pm.pm_display_cfg); + mutex_unlock(&adev->pm.mutex); + } amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); } else { mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); /* update battery/ac status */ if (power_supply_is_system_supplied() > 0) adev->pm.dpm.ac_power = true; -- cgit v1.2.1 From dfe8a0187c8dde66b3bc52882826b1e53920ad56 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Tue, 1 May 2018 10:15:16 -0400 Subject: drm/amd/amdgpu: vcn10 Add callback for emit_reg_write_reg_wait The callback .emit_reg_write_reg_wait was missing for vcn decode which resulted in a kernel oops. Signed-off-by: Tom St Denis Reviewed-by: Andrey Grodzovsky Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index d9a15338db7e..0501746b6c2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1109,6 +1109,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .end_use = amdgpu_vcn_ring_end_use, .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = { -- cgit v1.2.1 From 7e4237dbe494f9721463fd1f2d3b9e52ec74930e Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 2 May 2018 13:01:36 -0400 Subject: drm/amd/amdgpu: Add some documentation to the debugfs entries Signed-off-by: Tom St Denis Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 193 +++++++++++++++++++++++++++- 1 file changed, 189 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index c98e59721444..f5fb93795a69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -28,8 +28,13 @@ #include #include "amdgpu.h" -/* - * Debugfs +/** + * amdgpu_debugfs_add_files - Add simple debugfs entries + * + * @adev: Device to attach debugfs entries to + * @files: Array of function callbacks that respond to reads + * @nfiles: Number of callbacks to register + * */ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, const struct drm_info_list *files, @@ -64,7 +69,33 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev, #if defined(CONFIG_DEBUG_FS) - +/** + * amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes + * + * @read: True if reading + * @f: open file handle + * @buf: User buffer to write/read to + * @size: Number of bytes to write/read + * @pos: Offset to seek to + * + * This debugfs entry has special meaning on the offset being sought. + * Various bits have different meanings: + * + * Bit 62: Indicates a GRBM bank switch is needed + * Bit 61: Indicates a SRBM bank switch is needed (implies bit 62 is + * zero) + * Bits 24..33: The SE or ME selector if needed + * Bits 34..43: The SH (or SA) or PIPE selector if needed + * Bits 44..53: The INSTANCE (or CU/WGP) or QUEUE selector if needed + * + * Bit 23: Indicates that the PM power gating lock should be held + * This is necessary to read registers that might be + * unreliable during a power gating transistion. + * + * The lower bits are the BYTE offset of the register to read. This + * allows reading multiple registers in a single call and having + * the returned size reflect that. + */ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -164,19 +195,37 @@ end: return result; } - +/** + * amdgpu_debugfs_regs_read - Callback for reading MMIO registers + */ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { return amdgpu_debugfs_process_reg_op(true, f, buf, size, pos); } +/** + * amdgpu_debugfs_regs_write - Callback for writing MMIO registers + */ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos); } + +/** + * amdgpu_debugfs_regs_pcie_read - Read from a PCIE register + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to read. This + * allows reading multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -204,6 +253,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_debugfs_regs_pcie_write - Write to a PCIE register + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to write. This + * allows writing multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -232,6 +293,18 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user return result; } +/** + * amdgpu_debugfs_regs_didt_read - Read from a DIDT register + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to read. This + * allows reading multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -259,6 +332,18 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_debugfs_regs_didt_write - Write to a DIDT register + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to write. This + * allows writing multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -287,6 +372,18 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user return result; } +/** + * amdgpu_debugfs_regs_smc_read - Read from a SMC register + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to read. This + * allows reading multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -314,6 +411,18 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_debugfs_regs_smc_write - Write to a SMC register + * + * @f: open file handle + * @buf: User buffer to write data from + * @size: Number of bytes to write + * @pos: Offset to seek to + * + * The lower bits are the BYTE offset of the register to write. This + * allows writing multiple registers in a single call and having + * the returned size reflect that. + */ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -342,6 +451,20 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return result; } +/** + * amdgpu_debugfs_gca_config_read - Read from gfx config data + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * This file is used to access configuration data in a somewhat + * stable fashion. The format is a series of DWORDs with the first + * indicating which revision it is. New content is appended to the + * end so that older software can still read the data. + */ + static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -418,6 +541,19 @@ static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_debugfs_sensor_read - Read from the powerplay sensors + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The offset is treated as the BYTE address of one of the sensors + * enumerated in amd/include/kgd_pp_interface.h under the + * 'amd_pp_sensors' enumeration. For instance to read the UVD VCLK + * you would use the offset 3 * 4 = 12. + */ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -457,6 +593,27 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf, return !r ? outsize : r; } +/** amdgpu_debugfs_wave_read - Read WAVE STATUS data + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The offset being sought changes which wave that the status data + * will be returned for. The bits are used as follows: + * + * Bits 0..6: Byte offset into data + * Bits 7..14: SE selector + * Bits 15..22: SH/SA selector + * Bits 23..30: CU/{WGP+SIMD} selector + * Bits 31..36: WAVE ID selector + * Bits 37..44: SIMD ID selector + * + * The returned data begins with one DWORD of version information + * Followed by WAVE STATUS registers relevant to the GFX IP version + * being used. See gfx_v8_0_read_wave_data() for an example output. + */ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -507,6 +664,28 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, return result; } +/** amdgpu_debugfs_gpr_read - Read wave gprs + * + * @f: open file handle + * @buf: User buffer to store read data in + * @size: Number of bytes to read + * @pos: Offset to seek to + * + * The offset being sought changes which wave that the status data + * will be returned for. The bits are used as follows: + * + * Bits 0..11: Byte offset into data + * Bits 12..19: SE selector + * Bits 20..27: SH/SA selector + * Bits 28..35: CU/{WGP+SIMD} selector + * Bits 36..43: WAVE ID selector + * Bits 37..44: SIMD ID selector + * Bits 52..59: Thread selector + * Bits 60..61: Bank selector (VGPR=0,SGPR=1) + * + * The return data comes from the SGPR or VGPR register bank for + * the selected operational unit. + */ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -637,6 +816,12 @@ static const char *debugfs_regs_names[] = { "amdgpu_gpr", }; +/** + * amdgpu_debugfs_regs_init - Initialize debugfs entries that provide + * register access. + * + * @adev: The device to attach the debugfs entries to + */ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { struct drm_minor *minor = adev->ddev->primary; -- cgit v1.2.1 From 3f4299bee6eda852489ce4fd307dd709a09f5d8f Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 24 Apr 2018 12:14:39 +0800 Subject: drm/amdgpu: abstract bo_base init function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Chunming Zhou Reviewed-by: Christian König Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 73 ++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 35 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1c00f1a56e8b..71dcdefce255 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -94,6 +94,36 @@ struct amdgpu_prt_cb { struct dma_fence_cb cb; }; +static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, + struct amdgpu_vm *vm, + struct amdgpu_bo *bo) +{ + base->vm = vm; + base->bo = bo; + INIT_LIST_HEAD(&base->bo_list); + INIT_LIST_HEAD(&base->vm_status); + + if (!bo) + return; + list_add_tail(&base->bo_list, &bo->va); + + if (bo->tbo.resv != vm->root.base.bo->tbo.resv) + return; + + if (bo->preferred_domains & + amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) + return; + + /* + * we checked all the prerequisites, but it looks like this per vm bo + * is currently evicted. add the bo to the evicted list to make sure it + * is validated on next vm use to avoid fault. + * */ + spin_lock(&vm->status_lock); + list_move_tail(&base->vm_status, &vm->evicted); + spin_unlock(&vm->status_lock); +} + /** * amdgpu_vm_level_shift - return the addr shift for each level * @@ -446,11 +476,9 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, */ pt->parent = amdgpu_bo_ref(parent->base.bo); - entry->base.vm = vm; - entry->base.bo = pt; - list_add_tail(&entry->base.bo_list, &pt->va); + amdgpu_vm_bo_base_init(&entry->base, vm, pt); spin_lock(&vm->status_lock); - list_add(&entry->base.vm_status, &vm->relocated); + list_move(&entry->base.vm_status, &vm->relocated); spin_unlock(&vm->status_lock); } @@ -1841,36 +1869,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, if (bo_va == NULL) { return NULL; } - bo_va->base.vm = vm; - bo_va->base.bo = bo; - INIT_LIST_HEAD(&bo_va->base.bo_list); - INIT_LIST_HEAD(&bo_va->base.vm_status); + amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); - if (!bo) - return bo_va; - - list_add_tail(&bo_va->base.bo_list, &bo->va); - - if (bo->tbo.resv != vm->root.base.bo->tbo.resv) - return bo_va; - - if (bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) - return bo_va; - - /* - * We checked all the prerequisites, but it looks like this per VM BO - * is currently evicted. add the BO to the evicted list to make sure it - * is validated on next VM use to avoid fault. - * */ - spin_lock(&vm->status_lock); - list_move_tail(&bo_va->base.vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); - return bo_va; } @@ -2370,6 +2374,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int vm_context, unsigned int pasid) { struct amdgpu_bo_param bp; + struct amdgpu_bo *root; const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT(adev) * 8); unsigned ring_instance; @@ -2431,23 +2436,21 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.flags = flags; bp.type = ttm_bo_type_kernel; bp.resv = NULL; - r = amdgpu_bo_create(adev, &bp, &vm->root.base.bo); + r = amdgpu_bo_create(adev, &bp, &root); if (r) goto error_free_sched_entity; - r = amdgpu_bo_reserve(vm->root.base.bo, true); + r = amdgpu_bo_reserve(root, true); if (r) goto error_free_root; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, + r = amdgpu_vm_clear_bo(adev, vm, root, adev->vm_manager.root_level, vm->pte_support_ats); if (r) goto error_unreserve; - vm->root.base.vm = vm; - list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va); - list_add_tail(&vm->root.base.vm_status, &vm->evicted); + amdgpu_vm_bo_base_init(&vm->root.base, vm, root); amdgpu_bo_unreserve(vm->root.base.bo); if (pasid) { -- cgit v1.2.1 From 4bebcceededa794a26827d40ab52555c2ec37deb Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Tue, 24 Apr 2018 13:54:10 +0800 Subject: drm/amdgpu: invalidate parent bo when shadow bo was invalidated MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Shadow BO is located on GTT and its parent (PT and PD) BO could located on VRAM. In some case, the BO on GTT could be evicted but the parent did not. This may cause the shadow BO not be put in the evict list and could not be invalidate correctly. v2: suggested by Christian Signed-off-by: Chunming Zhou Reported-by: Shaoyun Liu Reviewed-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 71dcdefce255..8e71d3984016 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2252,6 +2252,10 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, { struct amdgpu_vm_bo_base *bo_base; + /* shadow bo doesn't have bo base, its validation needs its parent */ + if (bo->parent && bo->parent->shadow == bo) + bo = bo->parent; + list_for_each_entry(bo_base, &bo->va, bo_list) { struct amdgpu_vm *vm = bo_base->vm; -- cgit v1.2.1 From 4aa8c41bfb772cd86a5726e374b13d2f31cd9f4c Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 7 May 2018 14:23:04 +0800 Subject: drm/amd/pp: Refine the output of pp_power_profile_mode on VI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to keep consist with Vega, the output format of the pp_power_profile_mode would be < “*” for current profile>:"detail settings" and remove the "CURRENT" mode line. for example: NUM MODE_NAME SCLK_UP_HYST SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL MCLK_UP_HYST MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL 0 3D_FULL_SCREEN: 0 100 30 0 100 10 1 POWER_SAVING: 10 0 30 - - - 2 VIDEO: - - - 10 16 31 3 VR: 0 11 50 0 100 10 4 COMPUTE: 0 5 30 - - - 5 CUSTOM *: 0 5 30 0 100 10 NUM MODE_NAME SCLK_UP_HYST SCLK_DOWN_HYST SCLK_ACTIVE_LEVEL MCLK_UP_HYST MCLK_DOWN_HYST MCLK_ACTIVE_LEVEL 0 3D_FULL_SCREEN: 0 100 30 0 100 10 1 POWER_SAVING *: 10 0 30 0 100 10 2 VIDEO: - - - 10 16 31 3 VR: 0 11 50 0 100 10 4 COMPUTE: 0 5 30 - - - 5 CUSTOM: - - - - - - Reviewed-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 52 +++++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h | 1 - 2 files changed, 23 insertions(+), 30 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 4c94e7a057e9..39e49ce6bb5f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -75,12 +75,13 @@ #define PCIE_BUS_CLK 10000 #define TCLK (PCIE_BUS_CLK / 10) -static const struct profile_mode_setting smu7_profiling[5] = +static const struct profile_mode_setting smu7_profiling[6] = {{1, 0, 100, 30, 1, 0, 100, 10}, {1, 10, 0, 30, 0, 0, 0, 0}, {0, 0, 0, 0, 1, 10, 16, 31}, {1, 0, 11, 50, 1, 0, 100, 10}, {1, 0, 5, 30, 0, 0, 0, 0}, + {0, 0, 0, 0, 0, 0, 0, 0}, }; #define PPSMC_MSG_SetVBITimeout_VEGAM ((uint16_t) 0x310) @@ -4882,6 +4883,17 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) len = sizeof(smu7_profiling) / sizeof(struct profile_mode_setting); for (i = 0; i < len; i++) { + if (i == hwmgr->power_profile_mode) { + size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n", + i, profile_name[i], "*", + data->current_profile_setting.sclk_up_hyst, + data->current_profile_setting.sclk_down_hyst, + data->current_profile_setting.sclk_activity, + data->current_profile_setting.mclk_up_hyst, + data->current_profile_setting.mclk_down_hyst, + data->current_profile_setting.mclk_activity); + continue; + } if (smu7_profiling[i].bupdate_sclk) size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ", i, profile_name[i], smu7_profiling[i].sclk_up_hyst, @@ -4901,24 +4913,6 @@ static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf) "-", "-", "-"); } - size += sprintf(buf + size, "%3d %16s: %8d %16d %16d %16d %16d %16d\n", - i, profile_name[i], - data->custom_profile_setting.sclk_up_hyst, - data->custom_profile_setting.sclk_down_hyst, - data->custom_profile_setting.sclk_activity, - data->custom_profile_setting.mclk_up_hyst, - data->custom_profile_setting.mclk_down_hyst, - data->custom_profile_setting.mclk_activity); - - size += sprintf(buf + size, "%3s %16s: %8d %16d %16d %16d %16d %16d\n", - "*", "CURRENT", - data->current_profile_setting.sclk_up_hyst, - data->current_profile_setting.sclk_down_hyst, - data->current_profile_setting.sclk_activity, - data->current_profile_setting.mclk_up_hyst, - data->current_profile_setting.mclk_down_hyst, - data->current_profile_setting.mclk_activity); - return size; } @@ -4957,16 +4951,16 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint if (size < 8) return -EINVAL; - data->custom_profile_setting.bupdate_sclk = input[0]; - data->custom_profile_setting.sclk_up_hyst = input[1]; - data->custom_profile_setting.sclk_down_hyst = input[2]; - data->custom_profile_setting.sclk_activity = input[3]; - data->custom_profile_setting.bupdate_mclk = input[4]; - data->custom_profile_setting.mclk_up_hyst = input[5]; - data->custom_profile_setting.mclk_down_hyst = input[6]; - data->custom_profile_setting.mclk_activity = input[7]; - if (!smum_update_dpm_settings(hwmgr, &data->custom_profile_setting)) { - memcpy(&data->current_profile_setting, &data->custom_profile_setting, sizeof(struct profile_mode_setting)); + tmp.bupdate_sclk = input[0]; + tmp.sclk_up_hyst = input[1]; + tmp.sclk_down_hyst = input[2]; + tmp.sclk_activity = input[3]; + tmp.bupdate_mclk = input[4]; + tmp.mclk_up_hyst = input[5]; + tmp.mclk_down_hyst = input[6]; + tmp.mclk_activity = input[7]; + if (!smum_update_dpm_settings(hwmgr, &tmp)) { + memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting)); hwmgr->power_profile_mode = mode; } break; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h index 51a776ed5906..c91e75db6a8e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.h @@ -327,7 +327,6 @@ struct smu7_hwmgr { uint16_t mem_latency_high; uint16_t mem_latency_low; uint32_t vr_config; - struct profile_mode_setting custom_profile_setting; struct profile_mode_setting current_profile_setting; }; -- cgit v1.2.1 From 3d3c4f1b4d37412b2e3f1dd5e95293e61efa83df Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 28 Apr 2018 23:21:55 +0100 Subject: drm/amd/powerplay: fix spelling mistake: "contruct" -> "construct" Trivial fix to spelling mistake in PP_ASSERT_WITH_CODE message text Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 2 +- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 39e49ce6bb5f..8eb3f5176646 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1270,7 +1270,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr) tmp_result = smu7_construct_voltage_tables(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to contruct voltage tables!", + "Failed to construct voltage tables!", result = tmp_result); } smum_initialize_mc_reg_table(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 748612074d20..d156b7bb92ae 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -2860,7 +2860,7 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) tmp_result = vega10_construct_voltage_tables(hwmgr); PP_ASSERT_WITH_CODE(!tmp_result, - "Failed to contruct voltage tables!", + "Failed to construct voltage tables!", result = tmp_result); tmp_result = vega10_init_smc_table(hwmgr); -- cgit v1.2.1 From f4c2cc43218150da670f526aba1eeb3bcec9e3d2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 2 May 2018 15:43:16 +0100 Subject: drm/amd/display: clean up assignment of amdgpu_crtc The declaration of pointer amdgpu_crtc has a redundant assignment to amdgpu_crtc. Clean this up by removing it. Detected by CoverityScan, CID#1460299 ("Evaluation order violation") Reviewed-by: Harry Wentland Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3be17e26120d..3e0f3850dc9b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3748,7 +3748,7 @@ static void remove_stream(struct amdgpu_device *adev, static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, struct dc_cursor_position *position) { - struct amdgpu_crtc *amdgpu_crtc = amdgpu_crtc = to_amdgpu_crtc(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int x, y; int xorigin = 0, yorigin = 0; -- cgit v1.2.1 From 267256b5d884b1494cf30636c66cd95eeb25f41f Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Wed, 9 May 2018 17:17:58 +0800 Subject: drm/amd/powerplay: add PME smu message for raven Used for working around an audio bug on some platforms. Signed-off-by: Junwei Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h index 5d07b6ea0a55..a2991fa2e6f8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h @@ -82,7 +82,8 @@ #define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 #define PPSMC_MSG_SetSoftMaxVcn 0x34 #define PPSMC_MSG_PowerGateMmHub 0x35 -#define PPSMC_Message_Count 0x36 +#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 +#define PPSMC_Message_Count 0x37 typedef uint16_t PPSMC_Result; -- cgit v1.2.1 From 4ccd2d931c4bbebbca5a5e233f0d28ed57482e90 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 8 May 2018 14:20:25 +0800 Subject: drm/amd/pp: Implement force_clock_level for RV under manual dpm mode, user can set gfx/mem clock through sysfs pp_dpm_sclk/mclk on Rv. Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 45 +++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index be6d6e202819..8b75f525fe49 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -766,6 +766,51 @@ static int smu10_get_dal_power_level(struct pp_hwmgr *hwmgr, static int smu10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { + struct smu10_hwmgr *data = hwmgr->backend; + struct smu10_voltage_dependency_table *mclk_table = + data->clock_vol_info.vdd_dep_on_fclk; + uint32_t low, high; + + low = mask ? (ffs(mask) - 1) : 0; + high = mask ? (fls(mask) - 1) : 0; + + switch (type) { + case PP_SCLK: + if (low > 2 || high > 2) { + pr_info("Currently sclk only support 3 levels on RV\n"); + return -EINVAL; + } + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinGfxClk, + low == 2 ? data->gfx_max_freq_limit/100 : + low == 1 ? SMU10_UMD_PSTATE_GFXCLK : + data->gfx_min_freq_limit/100); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxGfxClk, + high == 0 ? data->gfx_min_freq_limit/100 : + high == 1 ? SMU10_UMD_PSTATE_GFXCLK : + data->gfx_max_freq_limit/100); + break; + + case PP_MCLK: + if (low > mclk_table->count - 1 || high > mclk_table->count - 1) + return -EINVAL; + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetHardMinFclkByFreq, + mclk_table->entries[low].clk/100); + + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetSoftMaxFclkByFreq, + mclk_table->entries[high].clk/100); + break; + + case PP_PCIE: + default: + break; + } return 0; } -- cgit v1.2.1 From 9164e8b7b32edeea75bf713a61f8bd1701b9a61b Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 10 May 2018 19:51:09 +0800 Subject: drm/amd/pp: Fix performance drop on Fiji The performance drop if the default TDP more than 256 Watt Reviewed-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index a264e0c35f45..99b29ff45d91 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -1042,12 +1042,10 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); - n = (n & 0xff) << 8; - if (data->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) return smum_send_msg_to_smc_with_parameter(hwmgr, - PPSMC_MSG_PkgPwrSetLimit, n); + PPSMC_MSG_PkgPwrSetLimit, n<<8); return 0; } -- cgit v1.2.1 From a50cb94819f81bfafb5dc5a605baba9b40ba3243 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Fri, 11 May 2018 11:02:23 +0800 Subject: drm/amdgpu: set ttm bo priority before initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Junwei Zhang Reviewed-by: David Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index e62153a86001..6a9e46ae7f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -419,6 +419,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, bo->tbo.bdev = &adev->mman.bdev; amdgpu_ttm_placement_from_domain(bo, bp->domain); + if (bp->type == ttm_bo_type_kernel) + bo->tbo.priority = 1; r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type, &bo->placement, page_align, &ctx, acc_size, @@ -434,9 +436,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, else amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0); - if (bp->type == ttm_bo_type_kernel) - bo->tbo.priority = 1; - if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED && bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { struct dma_fence *fence; -- cgit v1.2.1 From 996cab955384122848d8132554de43dce0d3c8a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 8 May 2018 12:26:52 +0200 Subject: drm/amdgpu: add HDP flush dummy for UVD 6/7 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The UVD firmware doesn't seem to like the HDP flush here. This worked for years without HDP flush, so just skip it. Signed-off-by: Christian König Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 16 ++++++++++++++-- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 13 ++++++++++++- 2 files changed, 26 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 6d3359889c0b..8041b26a7a21 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -963,6 +963,16 @@ static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); } +/** + * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing + * + * @ring: amdgpu_ring pointer + */ +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + /* The firmware doesn't seem to like touching registers at this point. */ +} + /** * uvd_v6_0_ring_test_ring - register write test * @@ -1528,12 +1538,13 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .set_wptr = uvd_v6_0_ring_set_wptr, .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_frame_size = - 6 + 6 + /* hdp flush / invalidate */ + 6 + /* hdp invalidate */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, @@ -1552,7 +1563,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, .emit_frame_size = - 6 + 6 + /* hdp flush / invalidate */ + 6 + /* hdp invalidate */ 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ @@ -1561,6 +1572,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { .emit_fence = uvd_v6_0_ring_emit_fence, .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 2251db4048f5..b0de1e04093b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1135,6 +1135,16 @@ static void uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); } +/** + * uvd_v7_0_ring_emit_hdp_flush - skip HDP flushing + * + * @ring: amdgpu_ring pointer + */ +static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + /* The firmware doesn't seem to like touching registers at this point. */ +} + /** * uvd_v7_0_ring_test_ring - register write test * @@ -1654,7 +1664,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .get_wptr = uvd_v7_0_ring_get_wptr, .set_wptr = uvd_v7_0_ring_set_wptr, .emit_frame_size = - 6 + 6 + /* hdp flush / invalidate */ + 6 + /* hdp invalidate */ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + 8 + /* uvd_v7_0_ring_emit_vm_flush */ @@ -1663,6 +1673,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .emit_ib = uvd_v7_0_ring_emit_ib, .emit_fence = uvd_v7_0_ring_emit_fence, .emit_vm_flush = uvd_v7_0_ring_emit_vm_flush, + .emit_hdp_flush = uvd_v7_0_ring_emit_hdp_flush, .test_ring = uvd_v7_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = uvd_v7_0_ring_insert_nop, -- cgit v1.2.1 From 323a9dbc452da5c155e5c17fe91c07093824fe27 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 May 2018 15:10:14 -0500 Subject: drm/amdgpu/gmc9: remove unused register defs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit These got moved to the new df module so no longer used in this file. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6c9f7f999532..6cccf0e0acd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -43,20 +43,6 @@ #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" -#define mmDF_CS_AON0_DramBaseAddress0 0x0044 -#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 -//DF_CS_AON0_DramBaseAddress0 -#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 -#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 -#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 -#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 -#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc -#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L -#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L -#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L -#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L -#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L - /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2 -- cgit v1.2.1 From c430bc977059a73758f666da545bc16c759fc165 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Fri, 11 May 2018 14:54:31 +0800 Subject: drm/amdgpu: fix null pointer for bo unmap trace function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit fix crash in trace. Signed-off-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 532263ab6e16..e96e26d3f3b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -275,7 +275,7 @@ TRACE_EVENT(amdgpu_vm_bo_unmap, ), TP_fast_assign( - __entry->bo = bo_va->base.bo; + __entry->bo = bo_va ? bo_va->base.bo : NULL; __entry->start = mapping->start; __entry->last = mapping->last; __entry->offset = mapping->offset; -- cgit v1.2.1 From a2a330ad66313084c9432b32862aa7e1255da9b4 Mon Sep 17 00:00:00 2001 From: Shirish S Date: Fri, 27 Apr 2018 15:47:21 +0530 Subject: drm/amd/display: remove need of modeset flag for overlay planes (V2) This patch is in continuation to the "843e3c7 drm/amd/display: defer modeset check in dm_update_planes_state" where we started to eliminate the dependency on DRM_MODE_ATOMIC_ALLOW_MODESET to be set by the user space, which as such is not mandatory. After deferring, this patch eliminates the dependency on the flag for overlay planes. This has to be done in stages as its a pretty complex and requires thorough testing before we free primary planes as well from dependency on modeset flag. V2: Simplified the plane type check. Signed-off-by: Shirish S Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 3e0f3850dc9b..f2f54a9df56f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4104,7 +4104,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, } spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - if (!pflip_needed) { + if (!pflip_needed || plane->type == DRM_PLANE_TYPE_OVERLAY) { WARN_ON(!dm_new_plane_state->dc_state); plane_states_constructed[planes_count] = dm_new_plane_state->dc_state; @@ -4827,7 +4827,8 @@ static int dm_update_planes_state(struct dc *dc, /* Remove any changed/removed planes */ if (!enable) { - if (pflip_needed) + if (pflip_needed && + plane->type != DRM_PLANE_TYPE_OVERLAY) continue; if (!old_plane_crtc) @@ -4874,7 +4875,8 @@ static int dm_update_planes_state(struct dc *dc, if (!dm_new_crtc_state->stream) continue; - if (pflip_needed) + if (pflip_needed && + plane->type != DRM_PLANE_TYPE_OVERLAY) continue; WARN_ON(dm_new_plane_state->dc_state); -- cgit v1.2.1 From 8eb77198131bab4417b711f899473f4ee6b8ad55 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Mon, 14 May 2018 10:13:57 -0400 Subject: drm/amd/powerplay: Add notify PWE function to SMU10 Functionality to message smc to enable pwe after gpu suspense. It is used in case when display resumes from S3 and wants to start audio driver by enabling pwe. Signed-off-by: Mikita Lipski Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 8 ++++++++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 2 files changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 8b75f525fe49..2f69bfa478a7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1113,6 +1113,13 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, data->water_marks_exist = true; return result; } + +static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr) +{ + + return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister); +} + static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) { return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); @@ -1153,6 +1160,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .power_state_set = smu10_set_power_state_tasks, .dynamic_state_management_disable = smu10_disable_dpm_tasks, .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, + .smus_notify_pwe = smu10_smus_notify_pwe, .gfx_off_control = smu10_gfx_off_control, }; diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 3d9743f5bb45..3c321c7d9626 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -326,6 +326,7 @@ struct pp_hwmgr_func { long *input, uint32_t size); int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); int (*set_mmhub_powergating_by_smu)(struct pp_hwmgr *hwmgr); + int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); }; struct pp_table_func { -- cgit v1.2.1 From 959a2091fae0fa498c79e095a4f6cbbb202a1194 Mon Sep 17 00:00:00 2001 From: Yong Zhao Date: Mon, 14 May 2018 12:15:27 -0400 Subject: drm/amdgpu: Add support to change mtype for 2nd part of gart BOs on GFX9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This change prepares for a workaround in amdkfd for a GFX9 HW bug. It requires the control stack memory of compute queues, which is allocated from the second page of MQD gart BOs, to have mtype NC, rather than the default UC. Signed-off-by: Yong Zhao Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 54 +++++++++++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 5 +-- 2 files changed, 47 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dfd22db13fb1..cc3b067e1ec6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -834,6 +834,45 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) sg_free_table(ttm->sg); } +int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, + struct ttm_buffer_object *tbo, + uint64_t flags) +{ + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); + struct ttm_tt *ttm = tbo->ttm; + struct amdgpu_ttm_tt *gtt = (void *)ttm; + int r; + + if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) { + uint64_t page_idx = 1; + + r = amdgpu_gart_bind(adev, gtt->offset, page_idx, + ttm->pages, gtt->ttm.dma_address, flags); + if (r) + goto gart_bind_fail; + + /* Patch mtype of the second part BO */ + flags &= ~AMDGPU_PTE_MTYPE_MASK; + flags |= AMDGPU_PTE_MTYPE(AMDGPU_MTYPE_NC); + + r = amdgpu_gart_bind(adev, + gtt->offset + (page_idx << PAGE_SHIFT), + ttm->num_pages - page_idx, + &ttm->pages[page_idx], + &(gtt->ttm.dma_address[page_idx]), flags); + } else { + r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + ttm->pages, gtt->ttm.dma_address, flags); + } + +gart_bind_fail: + if (r) + DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + ttm->num_pages, gtt->offset); + + return r; +} + static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -907,8 +946,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages, - bo->ttm->pages, gtt->ttm.dma_address, flags); + r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { ttm_bo_mem_put(bo, &tmp); return r; @@ -925,19 +963,15 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); - struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm; uint64_t flags; int r; - if (!gtt) + if (!tbo->ttm) return 0; - flags = amdgpu_ttm_tt_pte_flags(adev, >t->ttm.ttm, &tbo->mem); - r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages, - gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags); - if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); + r = amdgpu_ttm_gart_bind(adev, tbo, flags); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 30f080364c97..4cf678684a12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -75,11 +75,12 @@ struct amdgpu_bo_list_entry; /* PDE Block Fragment Size for VEGA10 */ #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) -/* VEGA10 only */ + +/* For GFX9 */ #define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57) #define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL) -/* For Raven */ +#define AMDGPU_MTYPE_NC 0 #define AMDGPU_MTYPE_CC 2 #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ -- cgit v1.2.1 From c7535379f660b721998ad6ab397809b0cbeb66d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 11 May 2018 23:13:39 +0800 Subject: drm/amdgpu: drop printing the BO offset in the gem debugfs (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is meaningless anyway. v2: remove unused variable (Alex) Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7d3dc229fa47..f79bbf81a088 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -785,7 +785,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) unsigned domain; const char *placement; unsigned pin_count; - uint64_t offset; domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); switch (domain) { @@ -803,10 +802,6 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) seq_printf(m, "\t0x%08x: %12ld byte %s", id, amdgpu_bo_size(bo), placement); - offset = READ_ONCE(bo->tbo.mem.start); - if (offset != AMDGPU_BO_INVALID_OFFSET) - seq_printf(m, " @ 0x%010Lx", offset); - pin_count = READ_ONCE(bo->pin_count); if (pin_count) seq_printf(m, " pin count %d", pin_count); -- cgit v1.2.1 From 6b155d6af03a053a7de2a72255563d7ef40c9644 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 11 May 2018 23:14:29 +0800 Subject: drm/amdgpu: print the BO flags in the gem debugfs entry MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quite useful to know. Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index f79bbf81a088..2c8e27370284 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -774,6 +774,12 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, } #if defined(CONFIG_DEBUG_FS) + +#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \ + if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ + seq_printf((m), " " #flag); \ + } + static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) { struct drm_gem_object *gobj = ptr; @@ -814,6 +820,15 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) else if (dma_buf) seq_printf(m, " exported as %p", dma_buf); + amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); + amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS); + amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC); + amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED); + amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW); + amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS); + amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID); + amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC); + seq_printf(m, "\n"); return 0; -- cgit v1.2.1 From 8344c53f57057b42a5da87e9557c40fcda18fb7a Mon Sep 17 00:00:00 2001 From: Nayan Deshmukh Date: Thu, 29 Mar 2018 22:36:32 +0530 Subject: drm/scheduler: remove unused parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this patch also effect the amdgpu and etnaviv drivers which use the function drm_sched_entity_init Signed-off-by: Nayan Deshmukh Suggested-by: Christian König Acked-by: Lucas Stach Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 6741a62a7d15..a8e531d604fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -91,7 +91,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, continue; r = drm_sched_entity_init(&ring->sched, &ctx->rings[i].entity, - rq, amdgpu_sched_jobs, &ctx->guilty); + rq, &ctx->guilty); if (r) goto failed; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index cc3b067e1ec6..5e9fd256faad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -111,7 +111,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) ring = adev->mman.buffer_funcs_ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; r = drm_sched_entity_init(&ring->sched, &adev->mman.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up TTM BO move run queue.\n"); goto error_entity; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index d8dd4028c2bb..de4d77af02ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -242,7 +242,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) ring = &adev->uvd.ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index e2186eda3271..a86322f5164f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -186,7 +186,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) ring = &adev->vce.ring[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vce.entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCE run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 58e495330b38..e5d234cf804f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -105,7 +105,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ring = &adev->vcn.ring_dec; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN dec run queue.\n"); return r; @@ -114,7 +114,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) ring = &adev->vcn.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up VCN enc run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 8e71d3984016..1a8f4e0dd023 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2404,7 +2404,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ring = adev->vm_manager.vm_pte_rings[ring_instance]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; r = drm_sched_entity_init(&ring->sched, &vm->entity, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 8041b26a7a21..ca6ab56357b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -429,7 +429,7 @@ static int uvd_v6_0_sw_init(void *handle) ring = &adev->uvd.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index b0de1e04093b..0ca63d588670 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -418,7 +418,7 @@ static int uvd_v7_0_sw_init(void *handle) ring = &adev->uvd.ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, - rq, amdgpu_sched_jobs, NULL); + rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); return r; -- cgit v1.2.1 From 548da31da9805645b1e8043da5081b9745545248 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 16 May 2018 16:43:34 +1000 Subject: drm/amdgpu: include pagemap.h for release_pages() Fixes: 5ae0283e831a ("drm/amdgpu: Add userptr support for KFD" Cc: Felix Kuehling Cc: Oded Gabbay Signed-off-by: Stephen Rothwell Signed-off-by: Dave Airlie --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 72ab2b1ffe75..ff8fd75f7ca5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -23,6 +23,7 @@ #define pr_fmt(fmt) "kfd2kgd: " fmt #include +#include #include #include #include "amdgpu_object.h" -- cgit v1.2.1 From c5fb5426dda897fbfeb3ddba81c9811f1178132c Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Sat, 10 Mar 2018 05:15:18 +0800 Subject: drm/amdgpu/gfx9: Update golden setting for gfx9_0. Update golden_settings_gc_9_0[]. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index fc1911834ab5..b05b7ae4d035 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -72,29 +72,22 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin"); static const struct soc15_reg_golden golden_settings_gc_9_0[] = { - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPC_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPG_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmIA_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001), SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_0, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_1, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_GPM_UTCL1_CNTL_2, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_PREWALKER_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_UTCL1_CNTL, 0x08000000, 0x08000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x01000107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = -- cgit v1.2.1 From 73aa1b9af5947f103913124f93ca19e6f3af1c1b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 9 May 2018 10:57:53 +0800 Subject: drm/amd/powerplay: new framework to honour DAL clock limits This is needed for vega12 and vega20 which do not support legacy powerstate. With this new framework, the DAL clocks limits can also be honored on these asics. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 9 +++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 7 +++++++ drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 2 ++ drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 2 ++ 4 files changed, 20 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index e411012b3dcb..f5571e9fde26 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -132,6 +132,15 @@ int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, return 0; } +int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (hwmgr->hwmgr_func->apply_clocks_adjust_rules != NULL) + return hwmgr->hwmgr_func->apply_clocks_adjust_rules(hwmgr); + return 0; +} + int phm_powerdown_uvd(struct pp_hwmgr *hwmgr) { PHM_FUNC_CHECK(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 308bff2b5d1d..2a2955c17d78 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -265,6 +265,13 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; + if (!hwmgr->ps) + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); + phm_display_configuration_changed(hwmgr); if (hwmgr->ps) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 9bb87857a20f..e029555dfc2d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -410,6 +410,8 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, struct pp_power_state *adjusted_ps, const struct pp_power_state *current_ps); +extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr); + extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 3c321c7d9626..9b6c6af869a6 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -229,6 +229,8 @@ struct pp_hwmgr_func { struct pp_power_state *prequest_ps, const struct pp_power_state *pcurrent_ps); + int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr); + int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, enum amd_dpm_forced_level level); -- cgit v1.2.1 From 11a89b431e41dcfaa4e7b9806233f60de905287b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 9 May 2018 11:08:29 +0800 Subject: drm/amd/powerplay: add a framework for perfroming pre display configuration change settings Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c | 10 ++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 2 ++ drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h | 1 + drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + 4 files changed, 14 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index f5571e9fde26..a0bb921fac22 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -170,6 +170,16 @@ int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr) return 0; } +int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr) +{ + PHM_FUNC_CHECK(hwmgr); + + if (NULL != hwmgr->hwmgr_func->pre_display_config_changed) + hwmgr->hwmgr_func->pre_display_config_changed(hwmgr); + + return 0; + +} int phm_display_configuration_changed(struct pp_hwmgr *hwmgr) { diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 2a2955c17d78..0af13c154328 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -272,6 +272,8 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, */ phm_apply_clock_adjust_rules(hwmgr); + phm_pre_display_configuration_changed(hwmgr); + phm_display_configuration_changed(hwmgr); if (hwmgr->ps) diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index e029555dfc2d..a202247c9894 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -413,6 +413,7 @@ extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr); extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); +extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr); extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 9b6c6af869a6..b99fb8ac822c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -254,6 +254,7 @@ struct pp_hwmgr_func { const void *state); int (*enable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); + int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); int (*display_config_changed)(struct pp_hwmgr *hwmgr); int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr); int (*update_clock_gatings)(struct pp_hwmgr *hwmgr, -- cgit v1.2.1 From cc3a98cc6efd0e60e0ed547f9f76f5d4e23fb758 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 11 May 2018 14:41:40 +0800 Subject: drm/amdgpu: Drop the unused header files in soc15.c. Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 90065766fffb..f31df18fcb81 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -41,8 +41,6 @@ #include "sdma1/sdma1_4_0_offset.h" #include "hdp/hdp_4_0_offset.h" #include "hdp/hdp_4_0_sh_mask.h" -#include "mp/mp_9_0_offset.h" -#include "mp/mp_9_0_sh_mask.h" #include "smuio/smuio_9_0_offset.h" #include "smuio/smuio_9_0_sh_mask.h" -- cgit v1.2.1 From b6110c00ced26b66999eb00b90c35b767cd45da4 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 6 Feb 2018 12:29:23 +0800 Subject: drm/amdgpu: Fix hardcoded base offset of vram pages In gmc_v9_0_vram_gtt_location(),the vram_base_offset is hardcoded to 0 in dGPU. Fix it by reading mmMC_VM_FB_OFFSET or return zfb_phys_addr if ZFB is enabled. Signed-off-by: Feifei Xu Signed-off-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6cccf0e0acd7..734306902e4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -693,10 +693,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, amdgpu_device_vram_location(adev, &adev->gmc, base); amdgpu_device_gart_location(adev, mc); /* base offset of vram pages */ - if (adev->flags & AMD_IS_APU) - adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); - else - adev->vm_manager.vram_base_offset = 0; + adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev); } /** -- cgit v1.2.1 From 5eb26e7ae16b8da302a361824b9c4a53a6f3ee0f Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 30 Jan 2018 10:59:23 +0800 Subject: drm/amd: Add vega20_ip_offset.h headerfile for vega20. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This headerfile contains vega20's ip base addresses. v2: squash in MP1_BASE fix Acked-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/vega20_ip_offset.h | 1050 ++++++++++++++++++++++++ 1 file changed, 1050 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/vega20_ip_offset.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h new file mode 100644 index 000000000000..97db93ceba4b --- /dev/null +++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h @@ -0,0 +1,1050 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _vega20_ip_offset_HEADER +#define _vega20_ip_offset_HEADER + +#define MAX_INSTANCE 6 +#define MAX_SEGMENT 6 + + +struct IP_BASE_INSTANCE +{ + unsigned int segment[MAX_SEGMENT]; +}; + +struct IP_BASE +{ + struct IP_BASE_INSTANCE instance[MAX_INSTANCE]; +}; + + +static const struct IP_BASE ATHUB_BASE ={ { { { 0x00000C20, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE CLK_BASE ={ { { { 0x00016C00, 0x00016E00, 0x00017000, 0x00017200, 0x0001B000, 0x0001B200 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DCE_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DF_BASE ={ { { { 0x00007000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE FUSE_BASE ={ { { { 0x00017400, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE GC_BASE ={ { { { 0x00002000, 0x0000A000, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE HDP_BASE ={ { { { 0x00000F20, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MMHUB_BASE ={ { { { 0x0001A000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP0_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE MP1_BASE ={ { { { 0x00016000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE NBIO_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D20, 0x00010400, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA0_BASE ={ { { { 0x00001260, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SDMA1_BASE ={ { { { 0x00001860, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE SMUIO_BASE ={ { { { 0x00016800, 0x00016A00, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE THM_BASE ={ { { { 0x00016600, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UMC_BASE ={ { { { 0x00014000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, 0, 0, 0, 0 } }, + { { 0, 0x00009000, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE VCE_BASE ={ { { { 0x00008800, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE XDMA_BASE ={ { { { 0x00003400, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE RSMU_BASE ={ { { { 0x00012000, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0, 0 } } } }; + + +#define ATHUB_BASE__INST0_SEG0 0x00000C20 +#define ATHUB_BASE__INST0_SEG1 0 +#define ATHUB_BASE__INST0_SEG2 0 +#define ATHUB_BASE__INST0_SEG3 0 +#define ATHUB_BASE__INST0_SEG4 0 +#define ATHUB_BASE__INST0_SEG5 0 + +#define ATHUB_BASE__INST1_SEG0 0 +#define ATHUB_BASE__INST1_SEG1 0 +#define ATHUB_BASE__INST1_SEG2 0 +#define ATHUB_BASE__INST1_SEG3 0 +#define ATHUB_BASE__INST1_SEG4 0 +#define ATHUB_BASE__INST1_SEG5 0 + +#define ATHUB_BASE__INST2_SEG0 0 +#define ATHUB_BASE__INST2_SEG1 0 +#define ATHUB_BASE__INST2_SEG2 0 +#define ATHUB_BASE__INST2_SEG3 0 +#define ATHUB_BASE__INST2_SEG4 0 +#define ATHUB_BASE__INST2_SEG5 0 + +#define ATHUB_BASE__INST3_SEG0 0 +#define ATHUB_BASE__INST3_SEG1 0 +#define ATHUB_BASE__INST3_SEG2 0 +#define ATHUB_BASE__INST3_SEG3 0 +#define ATHUB_BASE__INST3_SEG4 0 +#define ATHUB_BASE__INST3_SEG5 0 + +#define ATHUB_BASE__INST4_SEG0 0 +#define ATHUB_BASE__INST4_SEG1 0 +#define ATHUB_BASE__INST4_SEG2 0 +#define ATHUB_BASE__INST4_SEG3 0 +#define ATHUB_BASE__INST4_SEG4 0 +#define ATHUB_BASE__INST4_SEG5 0 + +#define ATHUB_BASE__INST5_SEG0 0 +#define ATHUB_BASE__INST5_SEG1 0 +#define ATHUB_BASE__INST5_SEG2 0 +#define ATHUB_BASE__INST5_SEG3 0 +#define ATHUB_BASE__INST5_SEG4 0 +#define ATHUB_BASE__INST5_SEG5 0 + +#define CLK_BASE__INST0_SEG0 0x00016C00 +#define CLK_BASE__INST0_SEG1 0x00016E00 +#define CLK_BASE__INST0_SEG2 0x00017000 +#define CLK_BASE__INST0_SEG3 0x00017200 +#define CLK_BASE__INST0_SEG4 0x0001B000 +#define CLK_BASE__INST0_SEG5 0x0001B200 + +#define CLK_BASE__INST1_SEG0 0 +#define CLK_BASE__INST1_SEG1 0 +#define CLK_BASE__INST1_SEG2 0 +#define CLK_BASE__INST1_SEG3 0 +#define CLK_BASE__INST1_SEG4 0 +#define CLK_BASE__INST1_SEG5 0 + +#define CLK_BASE__INST2_SEG0 0 +#define CLK_BASE__INST2_SEG1 0 +#define CLK_BASE__INST2_SEG2 0 +#define CLK_BASE__INST2_SEG3 0 +#define CLK_BASE__INST2_SEG4 0 +#define CLK_BASE__INST2_SEG5 0 + +#define CLK_BASE__INST3_SEG0 0 +#define CLK_BASE__INST3_SEG1 0 +#define CLK_BASE__INST3_SEG2 0 +#define CLK_BASE__INST3_SEG3 0 +#define CLK_BASE__INST3_SEG4 0 +#define CLK_BASE__INST3_SEG5 0 + +#define CLK_BASE__INST4_SEG0 0 +#define CLK_BASE__INST4_SEG1 0 +#define CLK_BASE__INST4_SEG2 0 +#define CLK_BASE__INST4_SEG3 0 +#define CLK_BASE__INST4_SEG4 0 +#define CLK_BASE__INST4_SEG5 0 + +#define CLK_BASE__INST5_SEG0 0 +#define CLK_BASE__INST5_SEG1 0 +#define CLK_BASE__INST5_SEG2 0 +#define CLK_BASE__INST5_SEG3 0 +#define CLK_BASE__INST5_SEG4 0 +#define CLK_BASE__INST5_SEG5 0 + +#define DCE_BASE__INST0_SEG0 0x00000012 +#define DCE_BASE__INST0_SEG1 0x000000C0 +#define DCE_BASE__INST0_SEG2 0x000034C0 +#define DCE_BASE__INST0_SEG3 0 +#define DCE_BASE__INST0_SEG4 0 +#define DCE_BASE__INST0_SEG5 0 + +#define DCE_BASE__INST1_SEG0 0 +#define DCE_BASE__INST1_SEG1 0 +#define DCE_BASE__INST1_SEG2 0 +#define DCE_BASE__INST1_SEG3 0 +#define DCE_BASE__INST1_SEG4 0 +#define DCE_BASE__INST1_SEG5 0 + +#define DCE_BASE__INST2_SEG0 0 +#define DCE_BASE__INST2_SEG1 0 +#define DCE_BASE__INST2_SEG2 0 +#define DCE_BASE__INST2_SEG3 0 +#define DCE_BASE__INST2_SEG4 0 +#define DCE_BASE__INST2_SEG5 0 + +#define DCE_BASE__INST3_SEG0 0 +#define DCE_BASE__INST3_SEG1 0 +#define DCE_BASE__INST3_SEG2 0 +#define DCE_BASE__INST3_SEG3 0 +#define DCE_BASE__INST3_SEG4 0 +#define DCE_BASE__INST3_SEG5 0 + +#define DCE_BASE__INST4_SEG0 0 +#define DCE_BASE__INST4_SEG1 0 +#define DCE_BASE__INST4_SEG2 0 +#define DCE_BASE__INST4_SEG3 0 +#define DCE_BASE__INST4_SEG4 0 +#define DCE_BASE__INST4_SEG5 0 + +#define DCE_BASE__INST5_SEG0 0 +#define DCE_BASE__INST5_SEG1 0 +#define DCE_BASE__INST5_SEG2 0 +#define DCE_BASE__INST5_SEG3 0 +#define DCE_BASE__INST5_SEG4 0 +#define DCE_BASE__INST5_SEG5 0 + +#define DF_BASE__INST0_SEG0 0x00007000 +#define DF_BASE__INST0_SEG1 0 +#define DF_BASE__INST0_SEG2 0 +#define DF_BASE__INST0_SEG3 0 +#define DF_BASE__INST0_SEG4 0 +#define DF_BASE__INST0_SEG5 0 + +#define DF_BASE__INST1_SEG0 0 +#define DF_BASE__INST1_SEG1 0 +#define DF_BASE__INST1_SEG2 0 +#define DF_BASE__INST1_SEG3 0 +#define DF_BASE__INST1_SEG4 0 +#define DF_BASE__INST1_SEG5 0 + +#define DF_BASE__INST2_SEG0 0 +#define DF_BASE__INST2_SEG1 0 +#define DF_BASE__INST2_SEG2 0 +#define DF_BASE__INST2_SEG3 0 +#define DF_BASE__INST2_SEG4 0 +#define DF_BASE__INST2_SEG5 0 + +#define DF_BASE__INST3_SEG0 0 +#define DF_BASE__INST3_SEG1 0 +#define DF_BASE__INST3_SEG2 0 +#define DF_BASE__INST3_SEG3 0 +#define DF_BASE__INST3_SEG4 0 +#define DF_BASE__INST3_SEG5 0 + +#define DF_BASE__INST4_SEG0 0 +#define DF_BASE__INST4_SEG1 0 +#define DF_BASE__INST4_SEG2 0 +#define DF_BASE__INST4_SEG3 0 +#define DF_BASE__INST4_SEG4 0 +#define DF_BASE__INST4_SEG5 0 + +#define DF_BASE__INST5_SEG0 0 +#define DF_BASE__INST5_SEG1 0 +#define DF_BASE__INST5_SEG2 0 +#define DF_BASE__INST5_SEG3 0 +#define DF_BASE__INST5_SEG4 0 +#define DF_BASE__INST5_SEG5 0 + +#define FUSE_BASE__INST0_SEG0 0x00017400 +#define FUSE_BASE__INST0_SEG1 0 +#define FUSE_BASE__INST0_SEG2 0 +#define FUSE_BASE__INST0_SEG3 0 +#define FUSE_BASE__INST0_SEG4 0 +#define FUSE_BASE__INST0_SEG5 0 + +#define FUSE_BASE__INST1_SEG0 0 +#define FUSE_BASE__INST1_SEG1 0 +#define FUSE_BASE__INST1_SEG2 0 +#define FUSE_BASE__INST1_SEG3 0 +#define FUSE_BASE__INST1_SEG4 0 +#define FUSE_BASE__INST1_SEG5 0 + +#define FUSE_BASE__INST2_SEG0 0 +#define FUSE_BASE__INST2_SEG1 0 +#define FUSE_BASE__INST2_SEG2 0 +#define FUSE_BASE__INST2_SEG3 0 +#define FUSE_BASE__INST2_SEG4 0 +#define FUSE_BASE__INST2_SEG5 0 + +#define FUSE_BASE__INST3_SEG0 0 +#define FUSE_BASE__INST3_SEG1 0 +#define FUSE_BASE__INST3_SEG2 0 +#define FUSE_BASE__INST3_SEG3 0 +#define FUSE_BASE__INST3_SEG4 0 +#define FUSE_BASE__INST3_SEG5 0 + +#define FUSE_BASE__INST4_SEG0 0 +#define FUSE_BASE__INST4_SEG1 0 +#define FUSE_BASE__INST4_SEG2 0 +#define FUSE_BASE__INST4_SEG3 0 +#define FUSE_BASE__INST4_SEG4 0 +#define FUSE_BASE__INST4_SEG5 0 + +#define FUSE_BASE__INST5_SEG0 0 +#define FUSE_BASE__INST5_SEG1 0 +#define FUSE_BASE__INST5_SEG2 0 +#define FUSE_BASE__INST5_SEG3 0 +#define FUSE_BASE__INST5_SEG4 0 +#define FUSE_BASE__INST5_SEG5 0 + +#define GC_BASE__INST0_SEG0 0x00002000 +#define GC_BASE__INST0_SEG1 0x0000A000 +#define GC_BASE__INST0_SEG2 0 +#define GC_BASE__INST0_SEG3 0 +#define GC_BASE__INST0_SEG4 0 +#define GC_BASE__INST0_SEG5 0 + +#define GC_BASE__INST1_SEG0 0 +#define GC_BASE__INST1_SEG1 0 +#define GC_BASE__INST1_SEG2 0 +#define GC_BASE__INST1_SEG3 0 +#define GC_BASE__INST1_SEG4 0 +#define GC_BASE__INST1_SEG5 0 + +#define GC_BASE__INST2_SEG0 0 +#define GC_BASE__INST2_SEG1 0 +#define GC_BASE__INST2_SEG2 0 +#define GC_BASE__INST2_SEG3 0 +#define GC_BASE__INST2_SEG4 0 +#define GC_BASE__INST2_SEG5 0 + +#define GC_BASE__INST3_SEG0 0 +#define GC_BASE__INST3_SEG1 0 +#define GC_BASE__INST3_SEG2 0 +#define GC_BASE__INST3_SEG3 0 +#define GC_BASE__INST3_SEG4 0 +#define GC_BASE__INST3_SEG5 0 + +#define GC_BASE__INST4_SEG0 0 +#define GC_BASE__INST4_SEG1 0 +#define GC_BASE__INST4_SEG2 0 +#define GC_BASE__INST4_SEG3 0 +#define GC_BASE__INST4_SEG4 0 +#define GC_BASE__INST4_SEG5 0 + +#define GC_BASE__INST5_SEG0 0 +#define GC_BASE__INST5_SEG1 0 +#define GC_BASE__INST5_SEG2 0 +#define GC_BASE__INST5_SEG3 0 +#define GC_BASE__INST5_SEG4 0 +#define GC_BASE__INST5_SEG5 0 + +#define HDP_BASE__INST0_SEG0 0x00000F20 +#define HDP_BASE__INST0_SEG1 0 +#define HDP_BASE__INST0_SEG2 0 +#define HDP_BASE__INST0_SEG3 0 +#define HDP_BASE__INST0_SEG4 0 +#define HDP_BASE__INST0_SEG5 0 + +#define HDP_BASE__INST1_SEG0 0 +#define HDP_BASE__INST1_SEG1 0 +#define HDP_BASE__INST1_SEG2 0 +#define HDP_BASE__INST1_SEG3 0 +#define HDP_BASE__INST1_SEG4 0 +#define HDP_BASE__INST1_SEG5 0 + +#define HDP_BASE__INST2_SEG0 0 +#define HDP_BASE__INST2_SEG1 0 +#define HDP_BASE__INST2_SEG2 0 +#define HDP_BASE__INST2_SEG3 0 +#define HDP_BASE__INST2_SEG4 0 +#define HDP_BASE__INST2_SEG5 0 + +#define HDP_BASE__INST3_SEG0 0 +#define HDP_BASE__INST3_SEG1 0 +#define HDP_BASE__INST3_SEG2 0 +#define HDP_BASE__INST3_SEG3 0 +#define HDP_BASE__INST3_SEG4 0 +#define HDP_BASE__INST3_SEG5 0 + +#define HDP_BASE__INST4_SEG0 0 +#define HDP_BASE__INST4_SEG1 0 +#define HDP_BASE__INST4_SEG2 0 +#define HDP_BASE__INST4_SEG3 0 +#define HDP_BASE__INST4_SEG4 0 +#define HDP_BASE__INST4_SEG5 0 + +#define HDP_BASE__INST5_SEG0 0 +#define HDP_BASE__INST5_SEG1 0 +#define HDP_BASE__INST5_SEG2 0 +#define HDP_BASE__INST5_SEG3 0 +#define HDP_BASE__INST5_SEG4 0 +#define HDP_BASE__INST5_SEG5 0 + +#define MMHUB_BASE__INST0_SEG0 0x0001A000 +#define MMHUB_BASE__INST0_SEG1 0 +#define MMHUB_BASE__INST0_SEG2 0 +#define MMHUB_BASE__INST0_SEG3 0 +#define MMHUB_BASE__INST0_SEG4 0 +#define MMHUB_BASE__INST0_SEG5 0 + +#define MMHUB_BASE__INST1_SEG0 0 +#define MMHUB_BASE__INST1_SEG1 0 +#define MMHUB_BASE__INST1_SEG2 0 +#define MMHUB_BASE__INST1_SEG3 0 +#define MMHUB_BASE__INST1_SEG4 0 +#define MMHUB_BASE__INST1_SEG5 0 + +#define MMHUB_BASE__INST2_SEG0 0 +#define MMHUB_BASE__INST2_SEG1 0 +#define MMHUB_BASE__INST2_SEG2 0 +#define MMHUB_BASE__INST2_SEG3 0 +#define MMHUB_BASE__INST2_SEG4 0 +#define MMHUB_BASE__INST2_SEG5 0 + +#define MMHUB_BASE__INST3_SEG0 0 +#define MMHUB_BASE__INST3_SEG1 0 +#define MMHUB_BASE__INST3_SEG2 0 +#define MMHUB_BASE__INST3_SEG3 0 +#define MMHUB_BASE__INST3_SEG4 0 +#define MMHUB_BASE__INST3_SEG5 0 + +#define MMHUB_BASE__INST4_SEG0 0 +#define MMHUB_BASE__INST4_SEG1 0 +#define MMHUB_BASE__INST4_SEG2 0 +#define MMHUB_BASE__INST4_SEG3 0 +#define MMHUB_BASE__INST4_SEG4 0 +#define MMHUB_BASE__INST4_SEG5 0 + +#define MMHUB_BASE__INST5_SEG0 0 +#define MMHUB_BASE__INST5_SEG1 0 +#define MMHUB_BASE__INST5_SEG2 0 +#define MMHUB_BASE__INST5_SEG3 0 +#define MMHUB_BASE__INST5_SEG4 0 +#define MMHUB_BASE__INST5_SEG5 0 + +#define MP0_BASE__INST0_SEG0 0x00016000 +#define MP0_BASE__INST0_SEG1 0 +#define MP0_BASE__INST0_SEG2 0 +#define MP0_BASE__INST0_SEG3 0 +#define MP0_BASE__INST0_SEG4 0 +#define MP0_BASE__INST0_SEG5 0 + +#define MP0_BASE__INST1_SEG0 0 +#define MP0_BASE__INST1_SEG1 0 +#define MP0_BASE__INST1_SEG2 0 +#define MP0_BASE__INST1_SEG3 0 +#define MP0_BASE__INST1_SEG4 0 +#define MP0_BASE__INST1_SEG5 0 + +#define MP0_BASE__INST2_SEG0 0 +#define MP0_BASE__INST2_SEG1 0 +#define MP0_BASE__INST2_SEG2 0 +#define MP0_BASE__INST2_SEG3 0 +#define MP0_BASE__INST2_SEG4 0 +#define MP0_BASE__INST2_SEG5 0 + +#define MP0_BASE__INST3_SEG0 0 +#define MP0_BASE__INST3_SEG1 0 +#define MP0_BASE__INST3_SEG2 0 +#define MP0_BASE__INST3_SEG3 0 +#define MP0_BASE__INST3_SEG4 0 +#define MP0_BASE__INST3_SEG5 0 + +#define MP0_BASE__INST4_SEG0 0 +#define MP0_BASE__INST4_SEG1 0 +#define MP0_BASE__INST4_SEG2 0 +#define MP0_BASE__INST4_SEG3 0 +#define MP0_BASE__INST4_SEG4 0 +#define MP0_BASE__INST4_SEG5 0 + +#define MP0_BASE__INST5_SEG0 0 +#define MP0_BASE__INST5_SEG1 0 +#define MP0_BASE__INST5_SEG2 0 +#define MP0_BASE__INST5_SEG3 0 +#define MP0_BASE__INST5_SEG4 0 +#define MP0_BASE__INST5_SEG5 0 + +#define MP1_BASE__INST0_SEG0 0x00016000 +#define MP1_BASE__INST0_SEG1 0 +#define MP1_BASE__INST0_SEG2 0 +#define MP1_BASE__INST0_SEG3 0 +#define MP1_BASE__INST0_SEG4 0 +#define MP1_BASE__INST0_SEG5 0 + +#define MP1_BASE__INST1_SEG0 0 +#define MP1_BASE__INST1_SEG1 0 +#define MP1_BASE__INST1_SEG2 0 +#define MP1_BASE__INST1_SEG3 0 +#define MP1_BASE__INST1_SEG4 0 +#define MP1_BASE__INST1_SEG5 0 + +#define MP1_BASE__INST2_SEG0 0 +#define MP1_BASE__INST2_SEG1 0 +#define MP1_BASE__INST2_SEG2 0 +#define MP1_BASE__INST2_SEG3 0 +#define MP1_BASE__INST2_SEG4 0 +#define MP1_BASE__INST2_SEG5 0 + +#define MP1_BASE__INST3_SEG0 0 +#define MP1_BASE__INST3_SEG1 0 +#define MP1_BASE__INST3_SEG2 0 +#define MP1_BASE__INST3_SEG3 0 +#define MP1_BASE__INST3_SEG4 0 +#define MP1_BASE__INST3_SEG5 0 + +#define MP1_BASE__INST4_SEG0 0 +#define MP1_BASE__INST4_SEG1 0 +#define MP1_BASE__INST4_SEG2 0 +#define MP1_BASE__INST4_SEG3 0 +#define MP1_BASE__INST4_SEG4 0 +#define MP1_BASE__INST4_SEG5 0 + +#define MP1_BASE__INST5_SEG0 0 +#define MP1_BASE__INST5_SEG1 0 +#define MP1_BASE__INST5_SEG2 0 +#define MP1_BASE__INST5_SEG3 0 +#define MP1_BASE__INST5_SEG4 0 +#define MP1_BASE__INST5_SEG5 0 + +#define NBIO_BASE__INST0_SEG0 0x00000000 +#define NBIO_BASE__INST0_SEG1 0x00000014 +#define NBIO_BASE__INST0_SEG2 0x00000D20 +#define NBIO_BASE__INST0_SEG3 0x00010400 +#define NBIO_BASE__INST0_SEG4 0 +#define NBIO_BASE__INST0_SEG5 0 + +#define NBIO_BASE__INST1_SEG0 0 +#define NBIO_BASE__INST1_SEG1 0 +#define NBIO_BASE__INST1_SEG2 0 +#define NBIO_BASE__INST1_SEG3 0 +#define NBIO_BASE__INST1_SEG4 0 +#define NBIO_BASE__INST1_SEG5 0 + +#define NBIO_BASE__INST2_SEG0 0 +#define NBIO_BASE__INST2_SEG1 0 +#define NBIO_BASE__INST2_SEG2 0 +#define NBIO_BASE__INST2_SEG3 0 +#define NBIO_BASE__INST2_SEG4 0 +#define NBIO_BASE__INST2_SEG5 0 + +#define NBIO_BASE__INST3_SEG0 0 +#define NBIO_BASE__INST3_SEG1 0 +#define NBIO_BASE__INST3_SEG2 0 +#define NBIO_BASE__INST3_SEG3 0 +#define NBIO_BASE__INST3_SEG4 0 +#define NBIO_BASE__INST3_SEG5 0 + +#define NBIO_BASE__INST4_SEG0 0 +#define NBIO_BASE__INST4_SEG1 0 +#define NBIO_BASE__INST4_SEG2 0 +#define NBIO_BASE__INST4_SEG3 0 +#define NBIO_BASE__INST4_SEG4 0 +#define NBIO_BASE__INST4_SEG5 0 + +#define NBIO_BASE__INST5_SEG0 0 +#define NBIO_BASE__INST5_SEG1 0 +#define NBIO_BASE__INST5_SEG2 0 +#define NBIO_BASE__INST5_SEG3 0 +#define NBIO_BASE__INST5_SEG4 0 +#define NBIO_BASE__INST5_SEG5 0 + +#define OSSSYS_BASE__INST0_SEG0 0x000010A0 +#define OSSSYS_BASE__INST0_SEG1 0 +#define OSSSYS_BASE__INST0_SEG2 0 +#define OSSSYS_BASE__INST0_SEG3 0 +#define OSSSYS_BASE__INST0_SEG4 0 +#define OSSSYS_BASE__INST0_SEG5 0 + +#define OSSSYS_BASE__INST1_SEG0 0 +#define OSSSYS_BASE__INST1_SEG1 0 +#define OSSSYS_BASE__INST1_SEG2 0 +#define OSSSYS_BASE__INST1_SEG3 0 +#define OSSSYS_BASE__INST1_SEG4 0 +#define OSSSYS_BASE__INST1_SEG5 0 + +#define OSSSYS_BASE__INST2_SEG0 0 +#define OSSSYS_BASE__INST2_SEG1 0 +#define OSSSYS_BASE__INST2_SEG2 0 +#define OSSSYS_BASE__INST2_SEG3 0 +#define OSSSYS_BASE__INST2_SEG4 0 +#define OSSSYS_BASE__INST2_SEG5 0 + +#define OSSSYS_BASE__INST3_SEG0 0 +#define OSSSYS_BASE__INST3_SEG1 0 +#define OSSSYS_BASE__INST3_SEG2 0 +#define OSSSYS_BASE__INST3_SEG3 0 +#define OSSSYS_BASE__INST3_SEG4 0 +#define OSSSYS_BASE__INST3_SEG5 0 + +#define OSSSYS_BASE__INST4_SEG0 0 +#define OSSSYS_BASE__INST4_SEG1 0 +#define OSSSYS_BASE__INST4_SEG2 0 +#define OSSSYS_BASE__INST4_SEG3 0 +#define OSSSYS_BASE__INST4_SEG4 0 +#define OSSSYS_BASE__INST4_SEG5 0 + +#define OSSSYS_BASE__INST5_SEG0 0 +#define OSSSYS_BASE__INST5_SEG1 0 +#define OSSSYS_BASE__INST5_SEG2 0 +#define OSSSYS_BASE__INST5_SEG3 0 +#define OSSSYS_BASE__INST5_SEG4 0 +#define OSSSYS_BASE__INST5_SEG5 0 + +#define SDMA0_BASE__INST0_SEG0 0x00001260 +#define SDMA0_BASE__INST0_SEG1 0 +#define SDMA0_BASE__INST0_SEG2 0 +#define SDMA0_BASE__INST0_SEG3 0 +#define SDMA0_BASE__INST0_SEG4 0 +#define SDMA0_BASE__INST0_SEG5 0 + +#define SDMA0_BASE__INST1_SEG0 0 +#define SDMA0_BASE__INST1_SEG1 0 +#define SDMA0_BASE__INST1_SEG2 0 +#define SDMA0_BASE__INST1_SEG3 0 +#define SDMA0_BASE__INST1_SEG4 0 +#define SDMA0_BASE__INST1_SEG5 0 + +#define SDMA0_BASE__INST2_SEG0 0 +#define SDMA0_BASE__INST2_SEG1 0 +#define SDMA0_BASE__INST2_SEG2 0 +#define SDMA0_BASE__INST2_SEG3 0 +#define SDMA0_BASE__INST2_SEG4 0 +#define SDMA0_BASE__INST2_SEG5 0 + +#define SDMA0_BASE__INST3_SEG0 0 +#define SDMA0_BASE__INST3_SEG1 0 +#define SDMA0_BASE__INST3_SEG2 0 +#define SDMA0_BASE__INST3_SEG3 0 +#define SDMA0_BASE__INST3_SEG4 0 +#define SDMA0_BASE__INST3_SEG5 0 + +#define SDMA0_BASE__INST4_SEG0 0 +#define SDMA0_BASE__INST4_SEG1 0 +#define SDMA0_BASE__INST4_SEG2 0 +#define SDMA0_BASE__INST4_SEG3 0 +#define SDMA0_BASE__INST4_SEG4 0 +#define SDMA0_BASE__INST4_SEG5 0 + +#define SDMA0_BASE__INST5_SEG0 0 +#define SDMA0_BASE__INST5_SEG1 0 +#define SDMA0_BASE__INST5_SEG2 0 +#define SDMA0_BASE__INST5_SEG3 0 +#define SDMA0_BASE__INST5_SEG4 0 +#define SDMA0_BASE__INST5_SEG5 0 + +#define SDMA1_BASE__INST0_SEG0 0x00001860 +#define SDMA1_BASE__INST0_SEG1 0 +#define SDMA1_BASE__INST0_SEG2 0 +#define SDMA1_BASE__INST0_SEG3 0 +#define SDMA1_BASE__INST0_SEG4 0 +#define SDMA1_BASE__INST0_SEG5 0 + +#define SDMA1_BASE__INST1_SEG0 0 +#define SDMA1_BASE__INST1_SEG1 0 +#define SDMA1_BASE__INST1_SEG2 0 +#define SDMA1_BASE__INST1_SEG3 0 +#define SDMA1_BASE__INST1_SEG4 0 +#define SDMA1_BASE__INST1_SEG5 0 + +#define SDMA1_BASE__INST2_SEG0 0 +#define SDMA1_BASE__INST2_SEG1 0 +#define SDMA1_BASE__INST2_SEG2 0 +#define SDMA1_BASE__INST2_SEG3 0 +#define SDMA1_BASE__INST2_SEG4 0 +#define SDMA1_BASE__INST2_SEG5 0 + +#define SDMA1_BASE__INST3_SEG0 0 +#define SDMA1_BASE__INST3_SEG1 0 +#define SDMA1_BASE__INST3_SEG2 0 +#define SDMA1_BASE__INST3_SEG3 0 +#define SDMA1_BASE__INST3_SEG4 0 +#define SDMA1_BASE__INST3_SEG5 0 + +#define SDMA1_BASE__INST4_SEG0 0 +#define SDMA1_BASE__INST4_SEG1 0 +#define SDMA1_BASE__INST4_SEG2 0 +#define SDMA1_BASE__INST4_SEG3 0 +#define SDMA1_BASE__INST4_SEG4 0 +#define SDMA1_BASE__INST4_SEG5 0 + +#define SDMA1_BASE__INST5_SEG0 0 +#define SDMA1_BASE__INST5_SEG1 0 +#define SDMA1_BASE__INST5_SEG2 0 +#define SDMA1_BASE__INST5_SEG3 0 +#define SDMA1_BASE__INST5_SEG4 0 +#define SDMA1_BASE__INST5_SEG5 0 + +#define SMUIO_BASE__INST0_SEG0 0x00016800 +#define SMUIO_BASE__INST0_SEG1 0x00016A00 +#define SMUIO_BASE__INST0_SEG2 0 +#define SMUIO_BASE__INST0_SEG3 0 +#define SMUIO_BASE__INST0_SEG4 0 +#define SMUIO_BASE__INST0_SEG5 0 + +#define SMUIO_BASE__INST1_SEG0 0 +#define SMUIO_BASE__INST1_SEG1 0 +#define SMUIO_BASE__INST1_SEG2 0 +#define SMUIO_BASE__INST1_SEG3 0 +#define SMUIO_BASE__INST1_SEG4 0 +#define SMUIO_BASE__INST1_SEG5 0 + +#define SMUIO_BASE__INST2_SEG0 0 +#define SMUIO_BASE__INST2_SEG1 0 +#define SMUIO_BASE__INST2_SEG2 0 +#define SMUIO_BASE__INST2_SEG3 0 +#define SMUIO_BASE__INST2_SEG4 0 +#define SMUIO_BASE__INST2_SEG5 0 + +#define SMUIO_BASE__INST3_SEG0 0 +#define SMUIO_BASE__INST3_SEG1 0 +#define SMUIO_BASE__INST3_SEG2 0 +#define SMUIO_BASE__INST3_SEG3 0 +#define SMUIO_BASE__INST3_SEG4 0 +#define SMUIO_BASE__INST3_SEG5 0 + +#define SMUIO_BASE__INST4_SEG0 0 +#define SMUIO_BASE__INST4_SEG1 0 +#define SMUIO_BASE__INST4_SEG2 0 +#define SMUIO_BASE__INST4_SEG3 0 +#define SMUIO_BASE__INST4_SEG4 0 +#define SMUIO_BASE__INST4_SEG5 0 + +#define SMUIO_BASE__INST5_SEG0 0 +#define SMUIO_BASE__INST5_SEG1 0 +#define SMUIO_BASE__INST5_SEG2 0 +#define SMUIO_BASE__INST5_SEG3 0 +#define SMUIO_BASE__INST5_SEG4 0 +#define SMUIO_BASE__INST5_SEG5 0 + +#define THM_BASE__INST0_SEG0 0x00016600 +#define THM_BASE__INST0_SEG1 0 +#define THM_BASE__INST0_SEG2 0 +#define THM_BASE__INST0_SEG3 0 +#define THM_BASE__INST0_SEG4 0 +#define THM_BASE__INST0_SEG5 0 + +#define THM_BASE__INST1_SEG0 0 +#define THM_BASE__INST1_SEG1 0 +#define THM_BASE__INST1_SEG2 0 +#define THM_BASE__INST1_SEG3 0 +#define THM_BASE__INST1_SEG4 0 +#define THM_BASE__INST1_SEG5 0 + +#define THM_BASE__INST2_SEG0 0 +#define THM_BASE__INST2_SEG1 0 +#define THM_BASE__INST2_SEG2 0 +#define THM_BASE__INST2_SEG3 0 +#define THM_BASE__INST2_SEG4 0 +#define THM_BASE__INST2_SEG5 0 + +#define THM_BASE__INST3_SEG0 0 +#define THM_BASE__INST3_SEG1 0 +#define THM_BASE__INST3_SEG2 0 +#define THM_BASE__INST3_SEG3 0 +#define THM_BASE__INST3_SEG4 0 +#define THM_BASE__INST3_SEG5 0 + +#define THM_BASE__INST4_SEG0 0 +#define THM_BASE__INST4_SEG1 0 +#define THM_BASE__INST4_SEG2 0 +#define THM_BASE__INST4_SEG3 0 +#define THM_BASE__INST4_SEG4 0 +#define THM_BASE__INST4_SEG5 0 + +#define THM_BASE__INST5_SEG0 0 +#define THM_BASE__INST5_SEG1 0 +#define THM_BASE__INST5_SEG2 0 +#define THM_BASE__INST5_SEG3 0 +#define THM_BASE__INST5_SEG4 0 +#define THM_BASE__INST5_SEG5 0 + +#define UMC_BASE__INST0_SEG0 0x00014000 +#define UMC_BASE__INST0_SEG1 0 +#define UMC_BASE__INST0_SEG2 0 +#define UMC_BASE__INST0_SEG3 0 +#define UMC_BASE__INST0_SEG4 0 +#define UMC_BASE__INST0_SEG5 0 + +#define UMC_BASE__INST1_SEG0 0 +#define UMC_BASE__INST1_SEG1 0 +#define UMC_BASE__INST1_SEG2 0 +#define UMC_BASE__INST1_SEG3 0 +#define UMC_BASE__INST1_SEG4 0 +#define UMC_BASE__INST1_SEG5 0 + +#define UMC_BASE__INST2_SEG0 0 +#define UMC_BASE__INST2_SEG1 0 +#define UMC_BASE__INST2_SEG2 0 +#define UMC_BASE__INST2_SEG3 0 +#define UMC_BASE__INST2_SEG4 0 +#define UMC_BASE__INST2_SEG5 0 + +#define UMC_BASE__INST3_SEG0 0 +#define UMC_BASE__INST3_SEG1 0 +#define UMC_BASE__INST3_SEG2 0 +#define UMC_BASE__INST3_SEG3 0 +#define UMC_BASE__INST3_SEG4 0 +#define UMC_BASE__INST3_SEG5 0 + +#define UMC_BASE__INST4_SEG0 0 +#define UMC_BASE__INST4_SEG1 0 +#define UMC_BASE__INST4_SEG2 0 +#define UMC_BASE__INST4_SEG3 0 +#define UMC_BASE__INST4_SEG4 0 +#define UMC_BASE__INST4_SEG5 0 + +#define UMC_BASE__INST5_SEG0 0 +#define UMC_BASE__INST5_SEG1 0 +#define UMC_BASE__INST5_SEG2 0 +#define UMC_BASE__INST5_SEG3 0 +#define UMC_BASE__INST5_SEG4 0 +#define UMC_BASE__INST5_SEG5 0 + +#define UVD_BASE__INST0_SEG0 0x00007800 +#define UVD_BASE__INST0_SEG1 0x00007E00 +#define UVD_BASE__INST0_SEG2 0 +#define UVD_BASE__INST0_SEG3 0 +#define UVD_BASE__INST0_SEG4 0 +#define UVD_BASE__INST0_SEG5 0 + +#define UVD_BASE__INST1_SEG0 0 +#define UVD_BASE__INST1_SEG1 0x00009000 +#define UVD_BASE__INST1_SEG2 0 +#define UVD_BASE__INST1_SEG3 0 +#define UVD_BASE__INST1_SEG4 0 +#define UVD_BASE__INST1_SEG5 0 + +#define UVD_BASE__INST2_SEG0 0 +#define UVD_BASE__INST2_SEG1 0 +#define UVD_BASE__INST2_SEG2 0 +#define UVD_BASE__INST2_SEG3 0 +#define UVD_BASE__INST2_SEG4 0 +#define UVD_BASE__INST2_SEG5 0 + +#define UVD_BASE__INST3_SEG0 0 +#define UVD_BASE__INST3_SEG1 0 +#define UVD_BASE__INST3_SEG2 0 +#define UVD_BASE__INST3_SEG3 0 +#define UVD_BASE__INST3_SEG4 0 +#define UVD_BASE__INST3_SEG5 0 + +#define UVD_BASE__INST4_SEG0 0 +#define UVD_BASE__INST4_SEG1 0 +#define UVD_BASE__INST4_SEG2 0 +#define UVD_BASE__INST4_SEG3 0 +#define UVD_BASE__INST4_SEG4 0 +#define UVD_BASE__INST4_SEG5 0 + +#define UVD_BASE__INST5_SEG0 0 +#define UVD_BASE__INST5_SEG1 0 +#define UVD_BASE__INST5_SEG2 0 +#define UVD_BASE__INST5_SEG3 0 +#define UVD_BASE__INST5_SEG4 0 +#define UVD_BASE__INST5_SEG5 0 + +#define VCE_BASE__INST0_SEG0 0x00008800 +#define VCE_BASE__INST0_SEG1 0 +#define VCE_BASE__INST0_SEG2 0 +#define VCE_BASE__INST0_SEG3 0 +#define VCE_BASE__INST0_SEG4 0 +#define VCE_BASE__INST0_SEG5 0 + +#define VCE_BASE__INST1_SEG0 0 +#define VCE_BASE__INST1_SEG1 0 +#define VCE_BASE__INST1_SEG2 0 +#define VCE_BASE__INST1_SEG3 0 +#define VCE_BASE__INST1_SEG4 0 +#define VCE_BASE__INST1_SEG5 0 + +#define VCE_BASE__INST2_SEG0 0 +#define VCE_BASE__INST2_SEG1 0 +#define VCE_BASE__INST2_SEG2 0 +#define VCE_BASE__INST2_SEG3 0 +#define VCE_BASE__INST2_SEG4 0 +#define VCE_BASE__INST2_SEG5 0 + +#define VCE_BASE__INST3_SEG0 0 +#define VCE_BASE__INST3_SEG1 0 +#define VCE_BASE__INST3_SEG2 0 +#define VCE_BASE__INST3_SEG3 0 +#define VCE_BASE__INST3_SEG4 0 +#define VCE_BASE__INST3_SEG5 0 + +#define VCE_BASE__INST4_SEG0 0 +#define VCE_BASE__INST4_SEG1 0 +#define VCE_BASE__INST4_SEG2 0 +#define VCE_BASE__INST4_SEG3 0 +#define VCE_BASE__INST4_SEG4 0 +#define VCE_BASE__INST4_SEG5 0 + +#define VCE_BASE__INST5_SEG0 0 +#define VCE_BASE__INST5_SEG1 0 +#define VCE_BASE__INST5_SEG2 0 +#define VCE_BASE__INST5_SEG3 0 +#define VCE_BASE__INST5_SEG4 0 +#define VCE_BASE__INST5_SEG5 0 + +#define XDMA_BASE__INST0_SEG0 0x00003400 +#define XDMA_BASE__INST0_SEG1 0 +#define XDMA_BASE__INST0_SEG2 0 +#define XDMA_BASE__INST0_SEG3 0 +#define XDMA_BASE__INST0_SEG4 0 +#define XDMA_BASE__INST0_SEG5 0 + +#define XDMA_BASE__INST1_SEG0 0 +#define XDMA_BASE__INST1_SEG1 0 +#define XDMA_BASE__INST1_SEG2 0 +#define XDMA_BASE__INST1_SEG3 0 +#define XDMA_BASE__INST1_SEG4 0 +#define XDMA_BASE__INST1_SEG5 0 + +#define XDMA_BASE__INST2_SEG0 0 +#define XDMA_BASE__INST2_SEG1 0 +#define XDMA_BASE__INST2_SEG2 0 +#define XDMA_BASE__INST2_SEG3 0 +#define XDMA_BASE__INST2_SEG4 0 +#define XDMA_BASE__INST2_SEG5 0 + +#define XDMA_BASE__INST3_SEG0 0 +#define XDMA_BASE__INST3_SEG1 0 +#define XDMA_BASE__INST3_SEG2 0 +#define XDMA_BASE__INST3_SEG3 0 +#define XDMA_BASE__INST3_SEG4 0 +#define XDMA_BASE__INST3_SEG5 0 + +#define XDMA_BASE__INST4_SEG0 0 +#define XDMA_BASE__INST4_SEG1 0 +#define XDMA_BASE__INST4_SEG2 0 +#define XDMA_BASE__INST4_SEG3 0 +#define XDMA_BASE__INST4_SEG4 0 +#define XDMA_BASE__INST4_SEG5 0 + +#define XDMA_BASE__INST5_SEG0 0 +#define XDMA_BASE__INST5_SEG1 0 +#define XDMA_BASE__INST5_SEG2 0 +#define XDMA_BASE__INST5_SEG3 0 +#define XDMA_BASE__INST5_SEG4 0 +#define XDMA_BASE__INST5_SEG5 0 + +#define RSMU_BASE__INST0_SEG0 0x00012000 +#define RSMU_BASE__INST0_SEG1 0 +#define RSMU_BASE__INST0_SEG2 0 +#define RSMU_BASE__INST0_SEG3 0 +#define RSMU_BASE__INST0_SEG4 0 +#define RSMU_BASE__INST0_SEG5 0 + +#define RSMU_BASE__INST1_SEG0 0 +#define RSMU_BASE__INST1_SEG1 0 +#define RSMU_BASE__INST1_SEG2 0 +#define RSMU_BASE__INST1_SEG3 0 +#define RSMU_BASE__INST1_SEG4 0 +#define RSMU_BASE__INST1_SEG5 0 + +#define RSMU_BASE__INST2_SEG0 0 +#define RSMU_BASE__INST2_SEG1 0 +#define RSMU_BASE__INST2_SEG2 0 +#define RSMU_BASE__INST2_SEG3 0 +#define RSMU_BASE__INST2_SEG4 0 +#define RSMU_BASE__INST2_SEG5 0 + +#define RSMU_BASE__INST3_SEG0 0 +#define RSMU_BASE__INST3_SEG1 0 +#define RSMU_BASE__INST3_SEG2 0 +#define RSMU_BASE__INST3_SEG3 0 +#define RSMU_BASE__INST3_SEG4 0 +#define RSMU_BASE__INST3_SEG5 0 + +#define RSMU_BASE__INST4_SEG0 0 +#define RSMU_BASE__INST4_SEG1 0 +#define RSMU_BASE__INST4_SEG2 0 +#define RSMU_BASE__INST4_SEG3 0 +#define RSMU_BASE__INST4_SEG4 0 +#define RSMU_BASE__INST4_SEG5 0 + +#define RSMU_BASE__INST5_SEG0 0 +#define RSMU_BASE__INST5_SEG1 0 +#define RSMU_BASE__INST5_SEG2 0 +#define RSMU_BASE__INST5_SEG3 0 +#define RSMU_BASE__INST5_SEG4 0 +#define RSMU_BASE__INST5_SEG5 0 + +#endif + -- cgit v1.2.1 From b2f87c9182deaf495ec4fefde89584910ec137d8 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 17 Apr 2018 16:25:58 -0400 Subject: drm/amd/include/vg20: adjust VCE_BASE to reuse vce 4.0 header files Vega20 uses vce 4.1 engine, all the registers have the same absolute offset with vce 4.0. By adjusting vega20 VCE_BASE, vce 4.1 can reuse vce 4.0 header files. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/vega20_ip_offset.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/vega20_ip_offset.h b/drivers/gpu/drm/amd/include/vega20_ip_offset.h index 97db93ceba4b..2a2a9cc8bedb 100644 --- a/drivers/gpu/drm/amd/include/vega20_ip_offset.h +++ b/drivers/gpu/drm/amd/include/vega20_ip_offset.h @@ -144,7 +144,8 @@ static const struct IP_BASE UVD_BASE ={ { { { 0x00007800, 0x00007E00, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } } } }; -static const struct IP_BASE VCE_BASE ={ { { { 0x00008800, 0, 0, 0, 0, 0 } }, +/* Adjust VCE_BASE to make vce_4_1 use vce_4_0 offset header files*/ +static const struct IP_BASE VCE_BASE ={ { { { 0x00007E00/* 0x00008800 */, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0 } }, -- cgit v1.2.1 From 956fcddc0b2a7430b6ee4783827f57cb7c823c7d Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 12:27:54 +0800 Subject: drm/amdgpu: Add vega20 to asic_type enum. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add vega20 to amd_asic_type enum and amdgpu_asic_name[]. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9fb20a53d5b2..f84fc560c797 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -86,6 +86,7 @@ static const char *amdgpu_asic_name[] = { "VEGAM", "VEGA10", "VEGA12", + "VEGA20", "RAVEN", "LAST", }; -- cgit v1.2.1 From 27c0bc7163ae8484d3a15324122774b240fadd21 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 17 May 2018 10:01:19 -0500 Subject: drm/amdgpu: Add gpu_info firmware for vega20. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vega20_gpu_info firmware stores gpu configuration for vega20. v2: drop gpu info firmware for vega20 Squash of: drm/amdgpu: Add gpu_info firmware for vega20. drm/amdgpu: drop gpu_info firmware for vega20 Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f84fc560c797..3a8d4bcd95f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1388,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_KABINI: case CHIP_MULLINS: #endif + case CHIP_VEGA20: default: return 0; case CHIP_VEGA10: -- cgit v1.2.1 From e4bd8170407dc54bc3f4b0e140816e51f13f3e71 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 12:33:33 +0800 Subject: drm/amdgpu: set asic family for vega20. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 3a8d4bcd95f5..2d46ad7bd8fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1523,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) #endif case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: if (adev->asic_type == CHIP_RAVEN) adev->family = AMDGPU_FAMILY_RV; -- cgit v1.2.1 From a167ae2509132e97c94d66cf1ce15ba2fa620248 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 12:46:21 +0800 Subject: drm/amdgpu: Add smu firmware support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | 3 +++ drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 1 + 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 5b3d3bf5b599..e950730f1933 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -400,6 +400,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, case CHIP_VEGA12: strcpy(fw_name, "amdgpu/vega12_smc.bin"); break; + case CHIP_VEGA20: + strcpy(fw_name, "amdgpu/vega20_smc.bin"); + break; default: DRM_ERROR("SMC firmware not supported\n"); return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index ee236dfbf1d6..c9837935f0f5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/vegam_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_smc.bin"); MODULE_FIRMWARE("amdgpu/vega10_acg_smc.bin"); MODULE_FIRMWARE("amdgpu/vega12_smc.bin"); +MODULE_FIRMWARE("amdgpu/vega20_smc.bin"); int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr) { -- cgit v1.2.1 From d3bfb6647cc66664f1e09706690444d2d09a56a8 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:32:46 +0800 Subject: drm/amdgpu/powerplay: Add initial vega20 support v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initial powerplay support the same as vega10 for now. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 71b42331f185..e63bc47dc715 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -151,6 +151,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_VEGA10: + case CHIP_VEGA20: hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); break; -- cgit v1.2.1 From 8fd2d849da98924e1e021314de289d4a3a31d07f Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:36:54 +0800 Subject: drm/amdgpu/psp: Add initial psp support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same as vega10 for now. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 1 + drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c7d43e064fc7..9f1a5bd39ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -52,6 +52,7 @@ static int psp_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: psp_v3_1_set_psp_funcs(psp); break; case CHIP_RAVEN: diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 196e75def1f2..0c768e388ace 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -41,6 +41,9 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sos.bin"); +MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); + #define smnMP1_FIRMWARE_FLAGS 0x3010028 -- cgit v1.2.1 From 4b1f540ae1a9eba826538cb37f6791729e2bcec8 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:38:24 +0800 Subject: drm/amdgpu: Add vega20 ucode loading method MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same as vega10. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 75592bd04d6a..b419d6e33b3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,6 +303,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA10: case CHIP_RAVEN: case CHIP_VEGA12: + case CHIP_VEGA20: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else -- cgit v1.2.1 From cac18c82e0c5b39b69648942576dbd1d6f9d056e Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 11 May 2018 13:44:09 -0500 Subject: drm/amdgpu: Specify vega20 uvd firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index de4d77af02ae..fd1e9cd65066 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -70,6 +70,7 @@ #define FIRMWARE_VEGA10 "amdgpu/vega10_uvd.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" +#define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" #define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) #define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) @@ -114,6 +115,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); @@ -177,6 +179,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) case CHIP_VEGAM: fw_name = FIRMWARE_VEGAM; break; + case CHIP_VEGA20: + fw_name = FIRMWARE_VEGA20; + break; default: return -EINVAL; } -- cgit v1.2.1 From 341b4ce2330b0af3fa09db545dc2d552a99dbdec Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:46:49 +0800 Subject: drm/amdgpu: Specify vega20 vce firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index a86322f5164f..23d960ec1cf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -57,6 +57,7 @@ #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin" #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin" +#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin" #ifdef CONFIG_DRM_AMDGPU_CIK MODULE_FIRMWARE(FIRMWARE_BONAIRE); @@ -76,6 +77,7 @@ MODULE_FIRMWARE(FIRMWARE_VEGAM); MODULE_FIRMWARE(FIRMWARE_VEGA10); MODULE_FIRMWARE(FIRMWARE_VEGA12); +MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); @@ -143,6 +145,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) case CHIP_VEGA12: fw_name = FIRMWARE_VEGA12; break; + case CHIP_VEGA20: + fw_name = FIRMWARE_VEGA20; + break; default: return -EINVAL; -- cgit v1.2.1 From a2c319b63ea377bce4f278d4ca1cb4d6da31e4fb Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:48:23 +0800 Subject: drm/amdgpu/virtual_dce: Add vega20 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_virtual.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index de7be3de0f41..dbf2ccd0c744 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -473,6 +473,7 @@ static int dce_virtual_hw_init(void *handle) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: break; default: DRM_ERROR("Virtual display unsupported ASIC type: 0x%X\n", adev->asic_type); -- cgit v1.2.1 From d96b428c3cea9ed12d03635a02fbf8644e315bc0 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:56:43 +0800 Subject: drm/amdgpu/gmc9: Add vega20 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 734306902e4e..b60ed288d314 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -752,6 +752,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: /* all engines support GPUVM */ case CHIP_VEGA12: /* all engines support GPUVM */ + case CHIP_VEGA20: default: adev->gmc.gart_size = 512ULL << 20; break; @@ -857,6 +858,7 @@ static int gmc_v9_0_sw_init(void *handle) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: /* * To fulfill 4-level page support, * vm size is 256TB (48bit), maximum size of Vega10, @@ -974,6 +976,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: + case CHIP_VEGA20: soc15_program_register_sequence(adev, golden_settings_mmhub_1_0_0, ARRAY_SIZE(golden_settings_mmhub_1_0_0)); -- cgit v1.2.1 From c2d7fd2baeba4c65a3cf7f61d6d54c205e4608f8 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 13:58:09 +0800 Subject: drm/amdgpu/mmhub: Add clockgating support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 43f925773b57..3d53c4413f13 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -734,6 +734,7 @@ int mmhub_v1_0_set_clockgating(struct amdgpu_device *adev, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: mmhub_v1_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); -- cgit v1.2.1 From 54a29ef758f6cc6b66b5f27dbfd90c9683920fab Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 14:00:02 +0800 Subject: drm/amdgpu/sdma4: Specify vega20 firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 03a36cbe7557..79b3a45b5715 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -42,6 +42,8 @@ MODULE_FIRMWARE("amdgpu/vega10_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma.bin"); MODULE_FIRMWARE("amdgpu/vega12_sdma1.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sdma.bin"); +MODULE_FIRMWARE("amdgpu/vega20_sdma1.bin"); MODULE_FIRMWARE("amdgpu/raven_sdma.bin"); #define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L @@ -182,6 +184,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA12: chip_name = "vega12"; break; + case CHIP_VEGA20: + chip_name = "vega20"; + break; case CHIP_RAVEN: chip_name = "raven"; break; -- cgit v1.2.1 From 84f50e9c80a74f9f8cac819c7a4b7ca220945b6d Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 23 Jan 2018 11:13:02 +0800 Subject: drm/amdgpu/sdma4: Add vega20 golden settings (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: squash in updates (Alex) v3: squash in more updates (Alex) Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 79b3a45b5715..dc12c365a886 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -109,6 +109,28 @@ static const struct soc15_reg_golden golden_settings_sdma_4_1[] = SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0) }; +static const struct soc15_reg_golden golden_settings_sdma_4_2[] = +{ + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff0, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), + SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0) +}; + static const struct soc15_reg_golden golden_settings_sdma_rv1[] = { SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00000002), @@ -141,6 +163,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_sdma_vg12, ARRAY_SIZE(golden_settings_sdma_vg12)); break; + case CHIP_VEGA20: + soc15_program_register_sequence(adev, + golden_settings_sdma_4_2, + ARRAY_SIZE(golden_settings_sdma_4_2)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_sdma_4_1, -- cgit v1.2.1 From 7eb32a7012ee592d6567e133a0d9c8c26e2590bf Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 23 Jan 2018 11:16:16 +0800 Subject: drm/amdgpu/sdma4: Add clockgating support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index dc12c365a886..ca53b3fba422 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1548,6 +1548,7 @@ static int sdma_v4_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: sdma_v4_0_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); -- cgit v1.2.1 From 940328fe35ab6e9f0eb1118f3cf91a22f97da298 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 14:22:48 +0800 Subject: drm/amdgpu/gfx9: Add support for vega20 firmware MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b05b7ae4d035..6976317dc6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -63,6 +63,13 @@ MODULE_FIRMWARE("amdgpu/vega12_mec.bin"); MODULE_FIRMWARE("amdgpu/vega12_mec2.bin"); MODULE_FIRMWARE("amdgpu/vega12_rlc.bin"); +MODULE_FIRMWARE("amdgpu/vega20_ce.bin"); +MODULE_FIRMWARE("amdgpu/vega20_pfp.bin"); +MODULE_FIRMWARE("amdgpu/vega20_me.bin"); +MODULE_FIRMWARE("amdgpu/vega20_mec.bin"); +MODULE_FIRMWARE("amdgpu/vega20_mec2.bin"); +MODULE_FIRMWARE("amdgpu/vega20_rlc.bin"); + MODULE_FIRMWARE("amdgpu/raven_ce.bin"); MODULE_FIRMWARE("amdgpu/raven_pfp.bin"); MODULE_FIRMWARE("amdgpu/raven_me.bin"); @@ -461,6 +468,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev) case CHIP_VEGA12: chip_name = "vega12"; break; + case CHIP_VEGA20: + chip_name = "vega20"; + break; case CHIP_RAVEN: chip_name = "raven"; break; -- cgit v1.2.1 From bb5368aac5b83c1fbb39ccd0d4a89af4465c84e2 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 23 Jan 2018 14:47:26 +0800 Subject: drm/amdgpu/gfx9: Add vega20 golden settings (v3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: squash in updates (Alex) v3: squash in more updates (Alex) Acked-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 6976317dc6b4..37492791a8f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -108,6 +108,20 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x00001800, 0x00000800) }; +static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = +{ + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG_READ, 0xf3e777ff, 0x22014042), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0x00003e00, 0x00000400), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE_1, 0xff840000, 0x04040000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmRMI_UTCL1_CNTL2, 0x00030000, 0x00030000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_CONFIG_CNTL_1, 0xffff010f, 0x01000107), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0x000b0000, 0x000b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01000000, 0x01000000) +}; + static const struct soc15_reg_golden golden_settings_gc_9_1[] = { SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL, 0xfffdf3cf, 0x00014104), @@ -241,6 +255,14 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) golden_settings_gc_9_2_1_vg12, ARRAY_SIZE(golden_settings_gc_9_2_1_vg12)); break; + case CHIP_VEGA20: + soc15_program_register_sequence(adev, + golden_settings_gc_9_0, + ARRAY_SIZE(golden_settings_gc_9_0)); + soc15_program_register_sequence(adev, + golden_settings_gc_9_0_vg20, + ARRAY_SIZE(golden_settings_gc_9_0_vg20)); + break; case CHIP_RAVEN: soc15_program_register_sequence(adev, golden_settings_gc_9_1, -- cgit v1.2.1 From d3adedb4559c01d18a934250e41a4660b4d89ac3 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 14:40:11 +0800 Subject: drm/amdgpu/gfx9: Add gfx config for vega20. (v4) v2: clean up (Alex) v3: additional cleanups (Alex) v4: drop leftover TODO (Alex) Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 37492791a8f8..8335d98a3f3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1137,6 +1137,16 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN; DRM_INFO("fix gfx.config for vega12\n"); break; + case CHIP_VEGA20: + adev->gfx.config.max_hw_contexts = 8; + adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; + adev->gfx.config.sc_prim_fifo_size_backend = 0x100; + adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; + adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0; + gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); + gb_addr_config &= ~0xf3e777ff; + gb_addr_config |= 0x22014042; + break; case CHIP_RAVEN: adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; -- cgit v1.2.1 From 61324ddc5b7a43c3b989fbbb2ac5d99009a04d4b Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 15:51:26 +0800 Subject: drm/amdgpu/gfx9: Add support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 8335d98a3f3b..92ed268a1b7f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1429,6 +1429,7 @@ static int gfx_v9_0_sw_init(void *handle) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: adev->gfx.mec.num_mec = 2; break; @@ -4715,6 +4716,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs; break; -- cgit v1.2.1 From 28b576b27a7acb29ce5b64da69d3855f6302350d Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 23 Jan 2018 15:03:36 +0800 Subject: drm/amdgpu/gfx9: Add clockgatting support for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 92ed268a1b7f..13253e09f4bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3724,6 +3724,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: case CHIP_RAVEN: gfx_v9_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); -- cgit v1.2.1 From 935be7a0ce4e181a23fc840861088e79dcb3dc08 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 26 Jan 2018 15:06:22 +0800 Subject: drm/amdgpu/soc15:Add vega20 soc15_common_early_init support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Set external_rev_id and disable cg,pg for now. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f31df18fcb81..f45bea84a73e 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -658,6 +658,11 @@ static int soc15_common_early_init(void *handle) adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; + case CHIP_VEGA20: + adev->cg_flags = 0; + adev->pg_flags = 0; + adev->external_rev_id = adev->rev_id + 0x28; + break; case CHIP_RAVEN: adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | -- cgit v1.2.1 From f980d127dba80214b4d793942492d3a4e6c46be0 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 26 Jan 2018 15:10:55 +0800 Subject: drm/amdgpu/soc15: Set common clockgating for vega20. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same as vega10 for now. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index f45bea84a73e..1fd75f5aa22b 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -875,6 +875,7 @@ static int soc15_common_set_clockgating_state(void *handle, switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: adev->nbio_funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); adev->nbio_funcs->update_medium_grain_light_sleep(adev, -- cgit v1.2.1 From 8ee273e516a096ee00b3be7cc15c8924aa3b1ef1 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 23 Mar 2018 14:42:28 -0500 Subject: drm/amdgpu/soc15: dynamic initialize ip offset for vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Vega20 need a seperate vega20_reg_init.c due to ip base offset difference. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++ drivers/gpu/drm/amd/amdgpu/soc15.h | 1 + drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c | 53 ++++++++++++++++++++++++++++ 4 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 68e9f584c570..012ea37b81be 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -62,7 +62,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o amdgpu-y += \ - vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o + vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ + vega20_reg_init.o # add DF block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 1fd75f5aa22b..c3133d16de77 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -487,6 +487,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_RAVEN: vega10_reg_base_init(adev); break; + case CHIP_VEGA20: + vega20_reg_base_init(adev); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index f70da8a29f86..1f714b7af520 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -55,5 +55,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); int vega10_reg_base_init(struct amdgpu_device *adev); +int vega20_reg_base_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c new file mode 100644 index 000000000000..52778de93ab0 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -0,0 +1,53 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "soc15.h" + +#include "soc15_common.h" +#include "soc15_hw_ip.h" +#include "vega20_ip_offset.h" + +int vega20_reg_base_init(struct amdgpu_device *adev) +{ + /* HW has more IP blocks, only initialized the blocke beend by our driver */ + uint32_t i; + for (i = 0 ; i < MAX_INSTANCE ; ++i) { + adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i])); + adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i])); + adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i])); + adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i])); + adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i])); + adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i])); + adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i])); + adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i])); + adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i])); + adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCE_BASE.instance[i])); + adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i])); + adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i])); + adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i])); + adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); + } + return 0; +} + + -- cgit v1.2.1 From 7c7af6c10d5dc733c2f181f653cb0a5b64e372a5 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 18:35:42 +0800 Subject: drm/amdgpu/soc15: Add ip blocks for vega20 (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Same as vega10 now. v2: squash in typo fix Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c3133d16de77..10337fb3fc1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -508,6 +508,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); -- cgit v1.2.1 From a95d89e2d8e268d90d0f97c9c57d61006eec78c3 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 23 Mar 2018 14:44:28 -0500 Subject: drm/amdgpu: Add nbio support for vega20 (v2) Some register offset in nbio v7.4 are different with v7.0. v2: Use nbio7.0 for now. TODO: add a new nbio 7.4 module (Alex) Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c | 18 +++++++++++++++++- drivers/gpu/drm/amd/amdgpu/soc15.c | 2 ++ 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index df34dc79d444..365517c0121e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -34,10 +34,19 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 +/* vega20 */ +#define mmRCC_DEV0_EPF0_STRAP0_VG20 0x0011 +#define mmRCC_DEV0_EPF0_STRAP0_VG20_BASE_IDX 2 + static u32 nbio_v7_0_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + if (adev->asic_type == CHIP_VEGA20) + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_VG20); + else + tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); + tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -75,10 +84,14 @@ static void nbio_v7_0_sdma_doorbell_range(struct amdgpu_device *adev, int instan SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA1_DOORBELL_RANGE); u32 doorbell_range = RREG32(reg); + u32 range = 2; + + if (adev->asic_type == CHIP_VEGA20) + range = 8; if (use_doorbell) { doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); - doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 2); + doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, range); } else doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); @@ -133,6 +146,9 @@ static void nbio_v7_0_update_medium_grain_clock_gating(struct amdgpu_device *ade { uint32_t def, data; + if (adev->asic_type == CHIP_VEGA20) + return; + /* NBIF_MGCG_CTRL_LCLK */ def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 10337fb3fc1f..4e065c68b86c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -496,6 +496,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) adev->nbio_funcs = &nbio_v7_0_funcs; + else if (adev->asic_type == CHIP_VEGA20) + adev->nbio_funcs = &nbio_v7_0_funcs; else adev->nbio_funcs = &nbio_v6_1_funcs; -- cgit v1.2.1 From 1fe6bf2f33fe6728cfb206e2ce476cb2d1dae406 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 19:50:01 +0800 Subject: drm/amd/display/dm: Add vega20 support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f2f54a9df56f..6f5cb26b243c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1115,6 +1115,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || + adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_RAVEN) client_id = SOC15_IH_CLIENTID_DCE; @@ -1518,6 +1519,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) #endif case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: if (dce110_register_irq_handlers(dm->adev)) { DRM_ERROR("DM: Failed to initialize IRQ\n"); goto fail; @@ -1718,6 +1720,7 @@ static int dm_early_init(void *handle) break; case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; @@ -1966,6 +1969,7 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev, if (adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA12 || + adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_RAVEN) { /* Fill GFX9 params */ plane_state->tiling_info.gfx9.num_pipes = -- cgit v1.2.1 From c6034aa2c4fc54bbe429cc6414f83a25bb4913f7 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Sat, 3 Feb 2018 12:19:46 +0800 Subject: drm/amdgpu: Add vega20 to dc support check (v2) v2: fix whitespace Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2d46ad7bd8fc..0e3f69d31b80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2158,6 +2158,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) case CHIP_FIJI: case CHIP_VEGA10: case CHIP_VEGA12: + case CHIP_VEGA20: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case CHIP_RAVEN: #endif -- cgit v1.2.1 From d82420b56a17d5b39579bc46f8dad757be684f94 Mon Sep 17 00:00:00 2001 From: Roman Li Date: Wed, 14 Feb 2018 17:20:54 -0500 Subject: drm/amd: Add dce-12.1 gpio aux registers (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updating dce12 register headers by adding dc registers required for potential DP LTTPR support. v2: fix mode change Acked-by: Christian König Signed-off-by: Roman Li Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/dce/dce_12_0_offset.h | 12 ++ .../amd/include/asic_reg/dce/dce_12_0_sh_mask.h | 152 +++++++++++++++++++++ 2 files changed, 164 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h index f730d0629020..b6f74bf4af02 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h @@ -2095,6 +2095,18 @@ #define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2 #define mmDC_GPIO_RXEN 0x212f #define mmDC_GPIO_RXEN_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_3 0x2130 +#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_4 0x2131 +#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_5 0x2132 +#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2 +#define mmAUXI2C_PAD_ALL_PWR_OK 0x2133 +#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2 +#define mmDC_GPIO_PULLUPEN 0x2134 +#define mmDC_GPIO_PULLUPEN_BASE_IDX 2 +#define mmDC_GPIO_AUX_CTRL_6 0x2135 +#define mmDC_GPIO_AUX_CTRL_6_BASE_IDX 2 #define mmBPHYC_DAC_MACRO_CNTL 0x2136 #define mmBPHYC_DAC_MACRO_CNTL_BASE_IDX 2 #define mmDAC_MACRO_CNTL_RESERVED0 0x2136 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h index 6d3162c42957..bcd190a3fcdd 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_sh_mask.h @@ -10971,6 +10971,158 @@ #define DC_GPIO_RXEN__DC_GPIO_BLON_RXEN_MASK 0x00100000L #define DC_GPIO_RXEN__DC_GPIO_DIGON_RXEN_MASK 0x00200000L #define DC_GPIO_RXEN__DC_GPIO_ENA_BL_RXEN_MASK 0x00400000L +//DC_GPIO_AUX_CTRL_3 +#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1 +#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM__SHIFT 0x2 +#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM__SHIFT 0x3 +#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM__SHIFT 0x5 +#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9 +#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP__SHIFT 0xa +#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP__SHIFT 0xb +#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP__SHIFT 0xd +#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10 +#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE__SHIFT 0x14 +#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE__SHIFT 0x16 +#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE__SHIFT 0x18 +#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE__SHIFT 0x1a +#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L +#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L +#define DC_GPIO_AUX_CTRL_3__AUX3_NEN_RTERM_MASK 0x00000004L +#define DC_GPIO_AUX_CTRL_3__AUX4_NEN_RTERM_MASK 0x00000008L +#define DC_GPIO_AUX_CTRL_3__AUX5_NEN_RTERM_MASK 0x00000010L +#define DC_GPIO_AUX_CTRL_3__AUX6_NEN_RTERM_MASK 0x00000020L +#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L +#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L +#define DC_GPIO_AUX_CTRL_3__AUX3_DP_DN_SWAP_MASK 0x00000400L +#define DC_GPIO_AUX_CTRL_3__AUX4_DP_DN_SWAP_MASK 0x00000800L +#define DC_GPIO_AUX_CTRL_3__AUX5_DP_DN_SWAP_MASK 0x00001000L +#define DC_GPIO_AUX_CTRL_3__AUX6_DP_DN_SWAP_MASK 0x00002000L +#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L +#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L +#define DC_GPIO_AUX_CTRL_3__AUX3_HYS_TUNE_MASK 0x00300000L +#define DC_GPIO_AUX_CTRL_3__AUX4_HYS_TUNE_MASK 0x00C00000L +#define DC_GPIO_AUX_CTRL_3__AUX5_HYS_TUNE_MASK 0x03000000L +#define DC_GPIO_AUX_CTRL_3__AUX6_HYS_TUNE_MASK 0x0C000000L +//DC_GPIO_AUX_CTRL_4 +#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL__SHIFT 0x10 +#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL__SHIFT 0x14 +#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL +#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L +#define DC_GPIO_AUX_CTRL_4__AUX3_AUX_CTRL_MASK 0x00000F00L +#define DC_GPIO_AUX_CTRL_4__AUX4_AUX_CTRL_MASK 0x0000F000L +#define DC_GPIO_AUX_CTRL_4__AUX5_AUX_CTRL_MASK 0x000F0000L +#define DC_GPIO_AUX_CTRL_4__AUX6_AUX_CTRL_MASK 0x00F00000L +//DC_GPIO_AUX_CTRL_5 +#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2 +#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE__SHIFT 0x6 +#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE__SHIFT 0xa +#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc +#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd +#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE__SHIFT 0xe +#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE__SHIFT 0xf +#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE__SHIFT 0x10 +#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE__SHIFT 0x11 +#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12 +#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13 +#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN__SHIFT 0x14 +#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN__SHIFT 0x15 +#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN__SHIFT 0x16 +#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN__SHIFT 0x17 +#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18 +#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19 +#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL__SHIFT 0x1a +#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL__SHIFT 0x1b +#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL__SHIFT 0x1c +#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL__SHIFT 0x1d +#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L +#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL +#define DC_GPIO_AUX_CTRL_5__AUX3_VOD_TUNE_MASK 0x00000030L +#define DC_GPIO_AUX_CTRL_5__AUX4_VOD_TUNE_MASK 0x000000C0L +#define DC_GPIO_AUX_CTRL_5__AUX5_VOD_TUNE_MASK 0x00000300L +#define DC_GPIO_AUX_CTRL_5__AUX6_VOD_TUNE_MASK 0x00000C00L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD3_I2CMODE_MASK 0x00004000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD4_I2CMODE_MASK 0x00008000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD5_I2CMODE_MASK 0x00010000L +#define DC_GPIO_AUX_CTRL_5__DDC_PAD6_I2CMODE_MASK 0x00020000L +#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L +#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L +#define DC_GPIO_AUX_CTRL_5__DDC3_I2C_VPH_1V2_EN_MASK 0x00100000L +#define DC_GPIO_AUX_CTRL_5__DDC4_I2C_VPH_1V2_EN_MASK 0x00200000L +#define DC_GPIO_AUX_CTRL_5__DDC5_I2C_VPH_1V2_EN_MASK 0x00400000L +#define DC_GPIO_AUX_CTRL_5__DDC6_I2C_VPH_1V2_EN_MASK 0x00800000L +#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L +#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L +#define DC_GPIO_AUX_CTRL_5__DDC3_PAD_I2C_CTRL_MASK 0x04000000L +#define DC_GPIO_AUX_CTRL_5__DDC4_PAD_I2C_CTRL_MASK 0x08000000L +#define DC_GPIO_AUX_CTRL_5__DDC5_PAD_I2C_CTRL_MASK 0x10000000L +#define DC_GPIO_AUX_CTRL_5__DDC6_PAD_I2C_CTRL_MASK 0x20000000L +//AUXI2C_PAD_ALL_PWR_OK +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK__SHIFT 0x2 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK__SHIFT 0x3 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK__SHIFT 0x4 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK__SHIFT 0x5 +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY3_ALL_PWR_OK_MASK 0x00000004L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY4_ALL_PWR_OK_MASK 0x00000008L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY5_ALL_PWR_OK_MASK 0x00000010L +#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY6_ALL_PWR_OK_MASK 0x00000020L +//DC_GPIO_PULLUPEN +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN__SHIFT 0x0 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN__SHIFT 0x1 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN__SHIFT 0x2 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN__SHIFT 0x3 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN__SHIFT 0x4 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN__SHIFT 0x5 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN__SHIFT 0x6 +#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN__SHIFT 0x8 +#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN__SHIFT 0x9 +#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN__SHIFT 0xe +#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN__SHIFT 0x14 +#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN__SHIFT 0x15 +#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN__SHIFT 0x16 +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICA_PU_EN_MASK 0x00000001L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICB_PU_EN_MASK 0x00000002L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICC_PU_EN_MASK 0x00000004L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICD_PU_EN_MASK 0x00000008L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICE_PU_EN_MASK 0x00000010L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICF_PU_EN_MASK 0x00000020L +#define DC_GPIO_PULLUPEN__DC_GPIO_GENERICG_PU_EN_MASK 0x00000040L +#define DC_GPIO_PULLUPEN__DC_GPIO_HSYNCA_PU_EN_MASK 0x00000100L +#define DC_GPIO_PULLUPEN__DC_GPIO_VSYNCA_PU_EN_MASK 0x00000200L +#define DC_GPIO_PULLUPEN__DC_GPIO_HPD1_PU_EN_MASK 0x00004000L +#define DC_GPIO_PULLUPEN__DC_GPIO_BLON_PU_EN_MASK 0x00100000L +#define DC_GPIO_PULLUPEN__DC_GPIO_DIGON_PU_EN_MASK 0x00200000L +#define DC_GPIO_PULLUPEN__DC_GPIO_ENA_BL_PU_EN_MASK 0x00400000L +//DC_GPIO_AUX_CTRL_6 +#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL__SHIFT 0x0 +#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL__SHIFT 0x2 +#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL__SHIFT 0x4 +#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL__SHIFT 0x6 +#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL__SHIFT 0x8 +#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL__SHIFT 0xa +#define DC_GPIO_AUX_CTRL_6__AUX1_PAD_RXSEL_MASK 0x00000003L +#define DC_GPIO_AUX_CTRL_6__AUX2_PAD_RXSEL_MASK 0x0000000CL +#define DC_GPIO_AUX_CTRL_6__AUX3_PAD_RXSEL_MASK 0x00000030L +#define DC_GPIO_AUX_CTRL_6__AUX4_PAD_RXSEL_MASK 0x000000C0L +#define DC_GPIO_AUX_CTRL_6__AUX5_PAD_RXSEL_MASK 0x00000300L +#define DC_GPIO_AUX_CTRL_6__AUX6_PAD_RXSEL_MASK 0x00000C00L //BPHYC_DAC_MACRO_CNTL #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_LEVEL__SHIFT 0x0 #define BPHYC_DAC_MACRO_CNTL__BPHYC_DAC_WHITE_FINE_CONTROL__SHIFT 0x8 -- cgit v1.2.1 From 138bc36051f817ce5bee33b0e7a4873bb04f1eb4 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Fri, 11 May 2018 13:46:19 -0500 Subject: drm/amd/display: Add Vega20 config. support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reviewed-by: Christian König Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index e6ca72c0d347..6dcec9c9126b 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -40,4 +40,13 @@ config DRM_AMD_DC_VEGAM help Choose this option if you want to have VEGAM support for display engine + +config DRM_AMD_DC_VG20 + bool "Vega20 support" + depends on DRM_AMD_DC + help + Choose this option if you want to have + Vega20 support for display engine + + endmenu -- cgit v1.2.1 From 14a13a0ef0665924a5e87947309b6c9abfb41903 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Fri, 20 Apr 2018 21:03:10 +0800 Subject: drm/amd/display: Remove COMBO_DISPLAY_PLL0 from Vega20 Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Hersen Wu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Feifei Xu --- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 11 ++++++++++- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 6 ++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index 78e6beb6cf26..aa4cf3095235 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -35,7 +35,7 @@ #endif #include "core_types.h" #include "dc_types.h" - +#include "dal_asic_id.h" #define TO_DCE_CLOCKS(clocks)\ container_of(clocks, struct dce_disp_clk, base) @@ -413,9 +413,18 @@ static int dce112_set_clock( /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ dce_clk_params.target_clock_frequency = 0; dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; +#ifndef CONFIG_DRM_AMD_DC_VG20 dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = (dce_clk_params.pll_id == CLOCK_SOURCE_COMBO_DISPLAY_PLL0); +#else + if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev)) + dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = + (dce_clk_params.pll_id == + CLOCK_SOURCE_COMBO_DISPLAY_PLL0); + else + dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; +#endif bp->funcs->set_dce_clock(bp, &dce_clk_params); diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 1b987b6a347d..77d2856be9f6 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -117,6 +117,12 @@ ((rev >= STONEY_A0) && (rev < CZ_UNKNOWN)) /* DCE12 */ +#define AI_UNKNOWN 0xFF + +#ifdef CONFIG_DRM_AMD_DC_VG20 +#define AI_VEGA20_P_A0 40 +#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN)) +#endif #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 -- cgit v1.2.1 From 1edb2c8a32160c00273485efea8d18080e31cc09 Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Fri, 11 May 2018 13:51:43 -0500 Subject: drm/amd/display: Add BIOS smu_info v3_3 support for Vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Acked-by: Christian König Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Hersen Wu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Feifei Xu --- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 10a5807a7e8b..4561673a0fe6 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1330,6 +1330,11 @@ static enum bp_result bios_parser_get_firmware_info( case 2: result = get_firmware_info_v3_2(bp, info); break; + case 3: +#ifdef CONFIG_DRM_AMD_DC_VG20 + result = get_firmware_info_v3_2(bp, info); +#endif + break; default: break; } -- cgit v1.2.1 From 8ad63122f9f22dde172b98fe9c75818831e57f4b Mon Sep 17 00:00:00 2001 From: "Jerry (Fangzhi) Zuo" Date: Mon, 5 Mar 2018 16:12:23 -0500 Subject: drm/amd/display: Add harvest IP support for Vega20 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Retrieve fuses to determine the availability of pipes, and eliminate pipes that cannot be used. Acked-by: Christian König Signed-off-by: Jerry (Fangzhi) Zuo Reviewed-by: Hersen Wu Reviewed-by: Tony Cheng Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Feifei Xu --- .../drm/amd/display/dc/dce120/dce120_resource.c | 208 +++++++++++++++++++++ 1 file changed, 208 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index fda01574d1ba..545f35f0821f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -814,6 +814,213 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } +#ifdef CONFIG_DRM_AMD_DC_VG20 +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); + /* VG20 support max 6 pipes */ + value = value & 0x3f; + return value; +} + +static bool construct( + uint8_t num_virtual_links, + struct dc *dc, + struct dce110_resource_pool *pool) +{ + unsigned int i; + int j; + struct dc_context *ctx = dc->ctx; + struct irq_service_init_data irq_init_data; + bool harvest_enabled = ASICREV_IS_VEGA20_P(ctx->asic_id.hw_internal_rev); + uint32_t pipe_fuses; + + ctx->dc_bios->regs = &bios_regs; + + pool->base.res_cap = &res_cap; + pool->base.funcs = &dce120_res_pool_funcs; + + /* TODO: Fill more data from GreenlandAsicCapability.cpp */ + pool->base.pipe_count = res_cap.num_timing_generator; + pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; + pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; + + dc->caps.max_downscale_ratio = 200; + dc->caps.i2c_speed_in_khz = 100; + dc->caps.max_cursor_size = 128; + dc->caps.dual_link_dvi = true; + + dc->debug = debug_defaults; + + /************************************************* + * Create resources * + *************************************************/ + + pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL0, + &clk_src_regs[0], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL1, + &clk_src_regs[1], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL2, + &clk_src_regs[2], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); + pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL5, + &clk_src_regs[5], false); + pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; + + pool->base.dp_clock_source = + dce120_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_ID_DP_DTO, + &clk_src_regs[0], true); + + for (i = 0; i < pool->base.clk_src_count; i++) { + if (pool->base.clock_sources[i] == NULL) { + dm_error("DC: failed to create clock sources!\n"); + BREAK_TO_DEBUGGER(); + goto clk_src_create_fail; + } + } + + pool->base.display_clock = dce120_disp_clk_create(ctx); + if (pool->base.display_clock == NULL) { + dm_error("DC: failed to create display clock!\n"); + BREAK_TO_DEBUGGER(); + goto disp_clk_create_fail; + } + + pool->base.dmcu = dce_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto res_create_fail; + } + + irq_init_data.ctx = dc->ctx; + pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); + if (!pool->base.irqs) + goto irqs_create_fail; + + /* retrieve valid pipe fuses */ + if (harvest_enabled) + pipe_fuses = read_pipe_fuses(ctx); + + /* index to valid pipe resource */ + j = 0; + for (i = 0; i < pool->base.pipe_count; i++) { + if (harvest_enabled) { + if ((pipe_fuses & (1 << i)) != 0) { + dm_error("DC: skip invalid pipe %d!\n", i); + continue; + } + } + + pool->base.timing_generators[j] = + dce120_timing_generator_create( + ctx, + i, + &dce120_tg_offsets[i]); + if (pool->base.timing_generators[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto controller_create_fail; + } + + pool->base.mis[j] = dce120_mem_input_create(ctx, i); + + if (pool->base.mis[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create memory input!\n"); + goto controller_create_fail; + } + + pool->base.ipps[j] = dce120_ipp_create(ctx, i); + if (pool->base.ipps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create input pixel processor!\n"); + goto controller_create_fail; + } + + pool->base.transforms[j] = dce120_transform_create(ctx, i); + if (pool->base.transforms[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create transform!\n"); + goto res_create_fail; + } + + pool->base.opps[j] = dce120_opp_create( + ctx, + i); + if (pool->base.opps[j] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + } + + /* check next valid pipe */ + j++; + } + + /* valid pipe num */ + pool->base.pipe_count = j; + pool->base.timing_generator_count = j; + + if (!resource_construct(num_virtual_links, dc, &pool->base, + &res_create_funcs)) + goto res_create_fail; + + /* Create hardware sequencer */ + if (!dce120_hw_sequencer_create(dc)) + goto controller_create_fail; + + dc->caps.max_planes = pool->base.pipe_count; + + bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); + + bw_calcs_data_update_from_pplib(dc); + + return true; + +irqs_create_fail: +controller_create_fail: +disp_clk_create_fail: +clk_src_create_fail: +res_create_fail: + + destruct(pool); + + return false; +} +#else static bool construct( uint8_t num_virtual_links, struct dc *dc, @@ -988,6 +1195,7 @@ res_create_fail: return false; } +#endif struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, -- cgit v1.2.1 From 6f68711dd63522aab34c3e9513fa42a7586a95e5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 15 Mar 2018 21:32:27 -0500 Subject: drm/amdgpu/atomfirmware: add new gfx_info data table v2.4 (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds additional gfx configuration data. v2: fix typo Acked-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index de177ce8ca80..fd5e80c92ed0 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1219,6 +1219,40 @@ struct atom_gfx_info_v2_3 { uint32_t rm21_sram_vmin_value; }; +struct atom_gfx_info_v2_4 { + struct atom_common_table_header table_header; + uint8_t gfxip_min_ver; + uint8_t gfxip_max_ver; + uint8_t gc_num_se; + uint8_t max_tile_pipes; + uint8_t gc_num_cu_per_sh; + uint8_t gc_num_sh_per_se; + uint8_t gc_num_rb_per_se; + uint8_t gc_num_tccs; + uint32_t regaddr_cp_dma_src_addr; + uint32_t regaddr_cp_dma_src_addr_hi; + uint32_t regaddr_cp_dma_dst_addr; + uint32_t regaddr_cp_dma_dst_addr_hi; + uint32_t regaddr_cp_dma_command; + uint32_t regaddr_cp_status; + uint32_t regaddr_rlc_gpu_clock_32; + uint32_t rlc_gpu_timer_refclk; + uint8_t active_cu_per_sh; + uint8_t active_rb_per_se; + uint16_t gcgoldenoffset; + uint32_t rm21_sram_vmin_value; + uint16_t gc_num_gprs; + uint16_t gc_gsprim_buff_depth; + uint16_t gc_parameter_cache_depth; + uint16_t gc_wave_size; + uint16_t gc_max_waves_per_simd; + uint16_t gc_lds_size; + uint8_t gc_num_max_gs_thds; + uint8_t gc_gs_table_depth; + uint8_t gc_double_offchip_lds_buffer; + uint8_t gc_max_scratch_slots_per_cu; +}; + /* *************************************************************************** Data Table smu_info structure -- cgit v1.2.1 From 59b0b509f1ae0c7ca54607f2770a1aec6e55d8dc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 20 Mar 2018 12:24:03 -0500 Subject: drm/amdgpu/atomfirmware: add parser for gfx_info table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for the gfx_info table on boards that use atomfirmware. Acked-by: Christian König Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 46 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h | 1 + 2 files changed, 47 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index a0f48cb9b8f0..7014d5875d5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -322,3 +322,49 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev) return ret; } + +union gfx_info { + struct atom_gfx_info_v2_4 v24; +}; + +int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index; + uint8_t frev, crev; + uint16_t data_offset; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + gfx_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + union gfx_info *gfx_info = (union gfx_info *) + (mode_info->atom_context->bios + data_offset); + switch (crev) { + case 4: + adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se; + adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh; + adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se; + adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se; + adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs; + adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs); + adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds; + adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth; + adev->gfx.config.gs_prim_buffer_depth = + le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); + adev->gfx.config.double_offchip_lds_buf = + gfx_info->v24.gc_double_offchip_lds_buffer; + adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size; + adev->gfx.cu_info.max_waves_per_simd = + le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = + gfx_info->v24.gc_max_scratch_slots_per_cu; + adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); + return 0; + default: + return -EINVAL; + } + + } + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 7689c961c4ef..20f158fd3b76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -30,5 +30,6 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); #endif -- cgit v1.2.1 From 3251c0438a1efcc51c357f7014b33b9e02b129cd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 20 Apr 2018 12:31:04 +0800 Subject: drm/amdgpu: Use vbios table for gpu info on vega20 Use the vbios table rather than gpu info firmware. Squash of the following patches: drm/amdgpu/vg20: fallback to vbios table if gpu info fw is not available (v2) drm/amdgpu: drop gpu_info firmware for vega20 Reviewed-by: Amber Lin Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 13253e09f4bd..d7530fdfaad5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -27,6 +27,7 @@ #include "amdgpu_gfx.h" #include "soc15.h" #include "soc15d.h" +#include "amdgpu_atomfirmware.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" @@ -1113,9 +1114,10 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q }; -static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) +static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) { u32 gb_addr_config; + int err; adev->gfx.funcs = &gfx_v9_0_gfx_funcs; @@ -1146,6 +1148,10 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG); gb_addr_config &= ~0xf3e777ff; gb_addr_config |= 0x22014042; + /* check vbios table if gpu info is not available */ + err = amdgpu_atomfirmware_get_gfx_info(adev); + if (err) + return err; break; case CHIP_RAVEN: adev->gfx.config.max_hw_contexts = 8; @@ -1196,6 +1202,8 @@ static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) adev->gfx.config.gb_addr_config, GB_ADDR_CONFIG, PIPE_INTERLEAVE_SIZE)); + + return 0; } static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, @@ -1557,7 +1565,9 @@ static int gfx_v9_0_sw_init(void *handle) adev->gfx.ce_ram_size = 0x8000; - gfx_v9_0_gpu_early_init(adev); + r = gfx_v9_0_gpu_early_init(adev); + if (r) + return r; r = gfx_v9_0_ngg_init(adev); if (r) -- cgit v1.2.1 From 24e6bc784363ee4056d81c8990a0127891678b43 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 24 Apr 2018 11:11:16 +0800 Subject: drm/amdgpu: Set vega20 load_type to AMDGPU_FW_LOAD_DIRECT. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Please revert this patch when psp load fw is enabled. Acked-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index b419d6e33b3a..f55f72a37ca8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -303,11 +303,12 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA10: case CHIP_RAVEN: case CHIP_VEGA12: - case CHIP_VEGA20: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; + case CHIP_VEGA20: + return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); } -- cgit v1.2.1 From fcdfa432a5b0569e8c5399effa950c71940b5889 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Fri, 18 May 2018 22:18:16 +0300 Subject: drm/amdgpu: conditionally compile amdgpu's amdkfd files In case CONFIG_HSA_AMD is not chosen, there is no need to compile amdkfd files that reside inside amdgpu dirver. In addition, because amdkfd depends on x86_64 architecture and amdgpu is not, compiling amdkfd files under i386 architecture can cause compiler errors and warnings. This patch modifies amdgpu's makefile to build amdkfd files only if CONFIG_HSA_AMD is chosen. The only file to be compiled unconditionally is amdgpu_amdkfd.c There are stub functions that are compiled only if amdkfd is not compiled. In that case, calls from amdgpu driver proper will go to those functions instead of the real functions. v2: instead of using function pointers, use stub functions v3: initialize kgd2kfd to NULL in case amdkfd is not compiled Reviewed-by: Felix Kuehling Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/Makefile | 13 +++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 47 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 ++++---- 3 files changed, 63 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index a51c5a960750..bfd332c95b61 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -56,8 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ # add asic specific block amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ - ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ - amdgpu_amdkfd_gfx_v7.o + ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce_v6_0.o si_dpm.o si_smc.o @@ -132,13 +131,21 @@ amdgpu-y += \ vcn_v1_0.o # add amdkfd interfaces +amdgpu-y += amdgpu_amdkfd.o + +ifneq ($(CONFIG_HSA_AMD),) amdgpu-y += \ - amdgpu_amdkfd.o \ amdgpu_amdkfd_fence.o \ amdgpu_amdkfd_gpuvm.o \ amdgpu_amdkfd_gfx_v8.o \ amdgpu_amdkfd_gfx_v9.o +ifneq ($(CONFIG_DRM_AMDGPU_CIK),) +amdgpu-y += amdgpu_amdkfd_gfx_v7.o +endif + +endif + # add cgs amdgpu-y += amdgpu_cgs.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bd36ee9f7e6d..95fcbd8a4bf3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -50,15 +50,21 @@ int amdgpu_amdkfd_init(void) kgd2kfd = NULL; } + #elif defined(CONFIG_HSA_AMD) + ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd); if (ret) kgd2kfd = NULL; #else + kgd2kfd = NULL; ret = -ENOENT; #endif + +#if defined(CONFIG_HSA_AMD_MODULE) || defined(CONFIG_HSA_AMD) amdgpu_amdkfd_gpuvm_init_mem_limits(); +#endif return ret; } @@ -464,3 +470,44 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } + +#if !defined(CONFIG_HSA_AMD_MODULE) && !defined(CONFIG_HSA_AMD) +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) +{ + return false; +} + +void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo) +{ +} + +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ +} + +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) +{ + return NULL; +} + +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) +{ + return 0; +} + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) +{ + return NULL; +} + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) +{ + return NULL; +} + +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) +{ + return NULL; +} +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 12367a9951e8..a8418a3f4e9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -156,14 +156,14 @@ uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); /* GPUVM API */ int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, void **vm, - void **process_info, - struct dma_fence **ef); + void **process_info, + struct dma_fence **ef); int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, - struct file *filp, - void **vm, void **process_info, - struct dma_fence **ef); + struct file *filp, + void **vm, void **process_info, + struct dma_fence **ef); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm); + struct amdgpu_vm *vm); void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); uint32_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( -- cgit v1.2.1 From 3fdbab5f5689a656fa719df752ca7608bcf66c99 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 26 Mar 2018 11:43:04 +0800 Subject: drm/amd/powerplay: update vega20 cg flags (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: remove duplicate flag. Reviewed-by: Christian König Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4e065c68b86c..63135cf79e00 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -665,7 +665,23 @@ static int soc15_common_early_init(void *handle) adev->external_rev_id = adev->rev_id + 0x14; break; case CHIP_VEGA20: - adev->cg_flags = 0; + adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_3D_CGCG | + AMD_CG_SUPPORT_GFX_3D_CGLS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_MGCG | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_ROM_MGCG | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x28; break; -- cgit v1.2.1 From 602ed6c69b128b77050e178aca9e945d969f3aa8 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Tue, 24 Apr 2018 11:20:16 +0800 Subject: drm/amdgpu: Disable ip modules that are not ready yet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Please enable above ips on soc15.c when they're available. Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 63135cf79e00..295bc9cd46f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -514,9 +514,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); - amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); - if (!amdgpu_sriov_vf(adev)) - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + if (adev->asic_type != CHIP_VEGA20) { + amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); + } if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) @@ -527,8 +529,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #endif amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + if (adev->asic_type != CHIP_VEGA20) { + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); + } break; case CHIP_RAVEN: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); -- cgit v1.2.1 From 2bb795f5ba9cd676536858a978b9df06f473af88 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 15 May 2018 14:25:46 -0500 Subject: drm/amdgpu/vg20:Restruct uvd to support multiple uvds Vega20 has dual-UVD. Need Restruct amdgpu_device::uvd to support multiple uvds. There are no any logical changes here. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 4 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 102 +++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h | 19 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 27 +++--- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 25 ++--- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 77 +++++++-------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 135 +++++++++++++------------- 9 files changed, 205 insertions(+), 194 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index d09fcab2398f..1070f4042cbb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.ring) { + if (ring != &adev->uvd.inst->ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); - ring->fence_drv.cpu_addr = adev->uvd.cpu_addr + index; - ring->fence_drv.gpu_addr = adev->uvd.gpu_addr + index; + ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; + ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index eb4785e51573..5620ed291107 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -348,7 +348,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - ring_mask = adev->uvd.ring.ready ? 1 : 0; + ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 16; break; @@ -362,7 +362,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; for (i = 0; i < adev->uvd.num_enc_rings; i++) - ring_mask |= ((adev->uvd.ring_enc[i].ready ? 1 : 0) << i); + ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 262c1267249e..2458d385e55a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -77,13 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.ring; + *out_ring = &adev->uvd.inst->ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.ring_enc[ring]; + *out_ring = &adev->uvd.inst->ring_enc[ring]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index fd1e9cd65066..02683a039a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -129,7 +129,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) unsigned version_major, version_minor, family_id; int i, r; - INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); + INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK @@ -237,16 +237,16 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, &adev->uvd.cpu_addr); + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, + &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); if (r) { dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); return r; } - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, rq, NULL); if (r != 0) { DRM_ERROR("Failed setting up UVD run queue.\n"); @@ -254,8 +254,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) } for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.handles[i], 0); - adev->uvd.filp[i] = NULL; + atomic_set(&adev->uvd.inst->handles[i], 0); + adev->uvd.inst->filp[i] = NULL; } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ @@ -285,18 +285,18 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i; - kfree(adev->uvd.saved_bo); + kfree(adev->uvd.inst->saved_bo); - drm_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); + drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); - amdgpu_bo_free_kernel(&adev->uvd.vcpu_bo, - &adev->uvd.gpu_addr, - (void **)&adev->uvd.cpu_addr); + amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, + &adev->uvd.inst->gpu_addr, + (void **)&adev->uvd.inst->cpu_addr); - amdgpu_ring_fini(&adev->uvd.ring); + amdgpu_ring_fini(&adev->uvd.inst->ring); for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); release_firmware(adev->uvd.fw); @@ -309,29 +309,29 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) void *ptr; int i; - if (adev->uvd.vcpu_bo == NULL) + if (adev->uvd.inst->vcpu_bo == NULL) return 0; - cancel_delayed_work_sync(&adev->uvd.idle_work); + cancel_delayed_work_sync(&adev->uvd.inst->idle_work); /* only valid for physical mode */ if (adev->asic_type < CHIP_POLARIS10) { for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.handles[i])) + if (atomic_read(&adev->uvd.inst->handles[i])) break; if (i == adev->uvd.max_handles) return 0; } - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); + ptr = adev->uvd.inst->cpu_addr; - adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - if (!adev->uvd.saved_bo) + adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst->saved_bo) return -ENOMEM; - memcpy_fromio(adev->uvd.saved_bo, ptr, size); + memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); return 0; } @@ -341,16 +341,16 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) unsigned size; void *ptr; - if (adev->uvd.vcpu_bo == NULL) + if (adev->uvd.inst->vcpu_bo == NULL) return -EINVAL; - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - ptr = adev->uvd.cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); + ptr = adev->uvd.inst->cpu_addr; - if (adev->uvd.saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.saved_bo, size); - kfree(adev->uvd.saved_bo); - adev->uvd.saved_bo = NULL; + if (adev->uvd.inst->saved_bo != NULL) { + memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); + kfree(adev->uvd.inst->saved_bo); + adev->uvd.inst->saved_bo = NULL; } else { const struct common_firmware_header *hdr; unsigned offset; @@ -358,14 +358,14 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) hdr = (const struct common_firmware_header *)adev->uvd.fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.cpu_addr, adev->uvd.fw->data + offset, + memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, le32_to_cpu(hdr->ucode_size_bytes)); size -= le32_to_cpu(hdr->ucode_size_bytes); ptr += le32_to_cpu(hdr->ucode_size_bytes); } memset_io(ptr, 0, size); /* to restore uvd fence seq */ - amdgpu_fence_driver_force_completion(&adev->uvd.ring); + amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); } return 0; @@ -373,12 +373,12 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; int i, r; for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.handles[i]); - if (handle != 0 && adev->uvd.filp[i] == filp) { + uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); + if (handle != 0 && adev->uvd.inst->filp[i] == filp) { struct dma_fence *fence; r = amdgpu_uvd_get_destroy_msg(ring, handle, @@ -391,8 +391,8 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) dma_fence_wait(fence, false); dma_fence_put(fence); - adev->uvd.filp[i] = NULL; - atomic_set(&adev->uvd.handles[i], 0); + adev->uvd.inst->filp[i] = NULL; + atomic_set(&adev->uvd.inst->handles[i], 0); } } } @@ -696,13 +696,13 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { + if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { DRM_ERROR("Handle 0x%x already in use!\n", handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { - adev->uvd.filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { + adev->uvd.inst->filp[i] = ctx->parser->filp; return 0; } } @@ -719,8 +719,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.handles[i]) == handle) { - if (adev->uvd.filp[i] != ctx->parser->filp) { + if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { + if (adev->uvd.inst->filp[i] != ctx->parser->filp) { DRM_ERROR("UVD handle collision detected!\n"); return -EINVAL; } @@ -734,7 +734,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; @@ -810,7 +810,7 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) } if ((cmd == 0 || cmd == 0x3) && - (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { + (start >> 28) != (ctx->parser->adev->uvd.inst->gpu_addr >> 28)) { DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", start, end); return -EINVAL; @@ -1043,7 +1043,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.entity, + r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1131,8 +1131,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, uvd.idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.ring); + container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); + unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring); if (fences == 0) { if (adev->pm.dpm_enabled) { @@ -1146,7 +1146,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) AMD_CG_STATE_GATE); } } else { - schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } } @@ -1158,7 +1158,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) if (amdgpu_sriov_vf(adev)) return; - set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); + set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, true); @@ -1175,7 +1175,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) { if (!amdgpu_sriov_vf(ring->adev)) - schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); + schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); } /** @@ -1209,7 +1209,7 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) } else if (r < 0) { DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); r = 0; } @@ -1237,7 +1237,7 @@ uint32_t amdgpu_uvd_used_handles(struct amdgpu_device *adev) * necessarily linear. So we need to count * all non-zero handles. */ - if (atomic_read(&adev->uvd.handles[i])) + if (atomic_read(&adev->uvd.inst->handles[i])) used_handles++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 32ea20b99e53..b1579fba134c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -31,30 +31,37 @@ #define AMDGPU_UVD_SESSION_SIZE (50*1024) #define AMDGPU_UVD_FIRMWARE_OFFSET 256 +#define AMDGPU_MAX_UVD_INSTANCES 2 + #define AMDGPU_UVD_FIRMWARE_SIZE(adev) \ (AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(((const struct common_firmware_header *)(adev)->uvd.fw->data)->ucode_size_bytes) + \ 8) - AMDGPU_UVD_FIRMWARE_OFFSET) -struct amdgpu_uvd { +struct amdgpu_uvd_inst { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; - unsigned fw_version; void *saved_bo; - unsigned max_handles; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; - const struct firmware *fw; /* UVD firmware */ struct amdgpu_ring ring; struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; struct amdgpu_irq_src irq; - bool address_64_bit; - bool use_ctx_buf; struct drm_sched_entity entity; struct drm_sched_entity entity_enc; uint32_t srbm_soft_reset; +}; + +struct amdgpu_uvd { + const struct firmware *fw; /* UVD firmware */ + unsigned fw_version; + unsigned max_handles; unsigned num_enc_rings; + uint8_t num_uvd_inst; + bool address_64_bit; + bool use_ctx_buf; + struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; }; int amdgpu_uvd_sw_init(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 87cbb142dd0b..5f22135de77f 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -93,6 +93,7 @@ static void uvd_v4_2_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v4_2_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v4_2_set_ring_funcs(adev); uvd_v4_2_set_irq_funcs(adev); @@ -107,7 +108,7 @@ static int uvd_v4_2_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -119,9 +120,9 @@ static int uvd_v4_2_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -150,7 +151,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, static int uvd_v4_2_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -208,7 +209,7 @@ done: static int uvd_v4_2_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v4_2_stop(adev); @@ -251,7 +252,7 @@ static int uvd_v4_2_resume(void *handle) */ static int uvd_v4_2_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz; int i, j, r; u32 tmp; @@ -536,7 +537,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) uint32_t size; /* programm the VCPU memory controller bits 0-27 */ - addr = (adev->uvd.gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; + addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3; size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3; WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr); WREG32(mmUVD_VCPU_CACHE_SIZE0, size); @@ -553,11 +554,11 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev) WREG32(mmUVD_VCPU_CACHE_SIZE2, size); /* bits 28-31 */ - addr = (adev->uvd.gpu_addr >> 28) & 0xF; + addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF; WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0)); /* bits 32-39 */ - addr = (adev->uvd.gpu_addr >> 32) & 0xFF; + addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF; WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31)); WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); @@ -664,7 +665,7 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -753,7 +754,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v4_2_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v4_2_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { @@ -763,8 +764,8 @@ static const struct amdgpu_irq_src_funcs uvd_v4_2_irq_funcs = { static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v4_2_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v4_2_irq_funcs; } const struct amdgpu_ip_block_version uvd_v4_2_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 6445d55e7d5a..f5d074a887fc 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -89,6 +89,7 @@ static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring) static int uvd_v5_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; uvd_v5_0_set_ring_funcs(adev); uvd_v5_0_set_irq_funcs(adev); @@ -103,7 +104,7 @@ static int uvd_v5_0_sw_init(void *handle) int r; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; @@ -115,9 +116,9 @@ static int uvd_v5_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); return r; } @@ -144,7 +145,7 @@ static int uvd_v5_0_sw_fini(void *handle) static int uvd_v5_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int r; @@ -204,7 +205,7 @@ done: static int uvd_v5_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v5_0_stop(adev); @@ -253,9 +254,9 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -287,7 +288,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev) */ static int uvd_v5_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -586,7 +587,7 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry) { DRM_DEBUG("IH: UVD TRAP\n"); - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); return 0; } @@ -861,7 +862,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs; + adev->uvd.inst->ring.funcs = &uvd_v5_0_ring_funcs; } static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { @@ -871,8 +872,8 @@ static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = { static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs; + adev->uvd.inst->irq.num_types = 1; + adev->uvd.inst->irq.funcs = &uvd_v5_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v5_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index ca6ab56357b5..dc391693d7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -91,7 +91,7 @@ static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_RPTR); else return RREG32(mmUVD_RB_RPTR2); @@ -121,7 +121,7 @@ static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32(mmUVD_RB_WPTR); else return RREG32(mmUVD_RB_WPTR2); @@ -152,7 +152,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -375,6 +375,7 @@ error: static int uvd_v6_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; if (!(adev->flags & AMD_IS_APU) && (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) @@ -399,14 +400,14 @@ static int uvd_v6_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 119, &adev->uvd.inst->irq); if (r) return r; } @@ -418,17 +419,17 @@ static int uvd_v6_0_sw_init(void *handle) if (!uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = NULL; + adev->uvd.inst->ring_enc[i].funcs = NULL; - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; adev->uvd.num_enc_rings = 0; DRM_INFO("UVD ENC is disabled\n"); } else { struct drm_sched_rq *rq; - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -440,17 +441,17 @@ static int uvd_v6_0_sw_init(void *handle) if (r) return r; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } @@ -469,10 +470,10 @@ static int uvd_v6_0_sw_fini(void *handle) return r; if (uvd_v6_0_enc_support(adev)) { - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); } return amdgpu_uvd_sw_fini(adev); @@ -488,7 +489,7 @@ static int uvd_v6_0_sw_fini(void *handle) static int uvd_v6_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -532,7 +533,7 @@ static int uvd_v6_0_hw_init(void *handle) if (uvd_v6_0_enc_support(adev)) { for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -563,7 +564,7 @@ done: static int uvd_v6_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (RREG32(mmUVD_STATUS) != 0) uvd_v6_0_stop(adev); @@ -611,9 +612,9 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) /* programm memory controller bits 0-27 */ WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = AMDGPU_UVD_FIRMWARE_OFFSET; size = AMDGPU_UVD_FIRMWARE_SIZE(adev); @@ -726,7 +727,7 @@ static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, */ static int uvd_v6_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -866,14 +867,14 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); if (uvd_v6_0_enc_support(adev)) { - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->uvd.ring_enc[1]; + ring = &adev->uvd.inst->ring_enc[1]; WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1158,10 +1159,10 @@ static bool uvd_v6_0_check_soft_reset(void *handle) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } @@ -1170,7 +1171,7 @@ static int uvd_v6_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v6_0_stop(adev); @@ -1182,9 +1183,9 @@ static int uvd_v6_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1212,7 +1213,7 @@ static int uvd_v6_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -1238,17 +1239,17 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); break; case 119: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); else int_handled = false; break; case 120: if (likely(uvd_v6_0_enc_support(adev))) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); else int_handled = false; break; @@ -1612,10 +1613,10 @@ static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { if (adev->asic_type >= CHIP_POLARIS10) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } else { - adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; DRM_INFO("UVD is enabled in physical mode\n"); } } @@ -1625,7 +1626,7 @@ static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; + adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; DRM_INFO("UVD ENC is enabled in VM mode\n"); } @@ -1638,11 +1639,11 @@ static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) { if (uvd_v6_0_enc_support(adev)) - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; else - adev->uvd.irq.num_types = 1; + adev->uvd.inst->irq.num_types = 1; - adev->uvd.irq.funcs = &uvd_v6_0_irq_funcs; + adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v6_0_ip_block = diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 0ca63d588670..66d4bea5fb2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -72,7 +72,7 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); @@ -106,7 +106,7 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); else return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); @@ -144,7 +144,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) return; } - if (ring == &adev->uvd.ring_enc[0]) + if (ring == &adev->uvd.inst->ring_enc[0]) WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else @@ -170,8 +170,8 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) r = amdgpu_ring_alloc(ring, 16); if (r) { - DRM_ERROR("amdgpu: uvd enc failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: uvd enc failed to lock (%d)ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, HEVC_ENC_CMD_END); @@ -184,11 +184,11 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed\n", - ring->idx); + DRM_ERROR("amdgpu: (%d)ring %d test failed\n", + ring->me, ring->idx); r = -ETIMEDOUT; } @@ -342,24 +342,24 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ring->me, r); goto error; } r = uvd_v7_0_enc_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ring->me, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ring->me); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ring->me, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ring->me, ring->idx); r = 0; } error: @@ -370,6 +370,7 @@ error: static int uvd_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->uvd.num_uvd_inst = 1; if (amdgpu_sriov_vf(adev)) adev->uvd.num_enc_rings = 1; @@ -390,13 +391,13 @@ static int uvd_v7_0_sw_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq); if (r) return r; /* UVD ENC TRAP */ for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.irq); + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq); if (r) return r; } @@ -415,9 +416,9 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.entity_enc, + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, rq, NULL); if (r) { DRM_ERROR("Failed setting up UVD ENC run queue.\n"); @@ -428,15 +429,15 @@ static int uvd_v7_0_sw_init(void *handle) if (r) return r; if (!amdgpu_sriov_vf(adev)) { - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; sprintf(ring->name, "uvd_enc%d", i); if (amdgpu_sriov_vf(adev)) { ring->use_doorbell = true; @@ -449,7 +450,7 @@ static int uvd_v7_0_sw_init(void *handle) else ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); if (r) return r; } @@ -472,10 +473,10 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - drm_sched_entity_fini(&adev->uvd.ring_enc[0].sched, &adev->uvd.entity_enc); + drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); return amdgpu_uvd_sw_fini(adev); } @@ -490,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle) static int uvd_v7_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t tmp; int i, r; @@ -543,7 +544,7 @@ static int uvd_v7_0_hw_init(void *handle) } for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.ring_enc[i]; + ring = &adev->uvd.inst->ring_enc[i]; ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -569,7 +570,7 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -627,9 +628,9 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) offset = 0; } else { WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = size; } @@ -638,16 +639,16 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); @@ -688,10 +689,10 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); - WDOORBELL32(adev->uvd.ring_enc[0].doorbell_index, 0); - adev->wb.wb[adev->uvd.ring_enc[0].wptr_offs] = 0; - adev->uvd.ring_enc[0].wptr = 0; - adev->uvd.ring_enc[0].wptr_old = 0; + WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0; + adev->uvd.inst->ring_enc[0].wptr = 0; + adev->uvd.inst->ring_enc[0].wptr_old = 0; /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -742,7 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; - ring = &adev->uvd.ring; + ring = &adev->uvd.inst->ring; ring->wptr = 0; size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); @@ -757,9 +758,9 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) offset = 0; } else { MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr)); + lower_32_bits(adev->uvd.inst->gpu_addr)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr)); + upper_32_bits(adev->uvd.inst->gpu_addr)); offset = size; } @@ -768,16 +769,16 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); @@ -841,7 +842,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; ring->wptr = 0; MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); @@ -874,7 +875,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) */ static int uvd_v7_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; + struct amdgpu_ring *ring = &adev->uvd.inst->ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; @@ -1027,14 +1028,14 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - ring = &adev->uvd.ring_enc[0]; + ring = &adev->uvd.inst->ring_enc[0]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); - ring = &adev->uvd.ring_enc[1]; + ring = &adev->uvd.inst->ring_enc[1]; WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); @@ -1162,8 +1163,8 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { - DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", - ring->idx, r); + DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", + ring->me, ring->idx, r); return r; } amdgpu_ring_write(ring, @@ -1178,11 +1179,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) } if (i < adev->usec_timeout) { - DRM_DEBUG("ring test on %d succeeded in %d usecs\n", - ring->idx, i); + DRM_DEBUG("(%d)ring test on %d succeeded in %d usecs\n", + ring->me, ring->idx, i); } else { - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", - ring->idx, tmp); + DRM_ERROR("(%d)amdgpu: ring %d test failed (0x%08X)\n", + ring->me, ring->idx, tmp); r = -EINVAL; } return r; @@ -1365,10 +1366,10 @@ static bool uvd_v7_0_check_soft_reset(void *handle) SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.srbm_soft_reset = 0; + adev->uvd.inst->srbm_soft_reset = 0; return false; } } @@ -1377,7 +1378,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; uvd_v7_0_stop(adev); @@ -1389,9 +1390,9 @@ static int uvd_v7_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1419,7 +1420,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.srbm_soft_reset) + if (!adev->uvd.inst->srbm_soft_reset) return 0; mdelay(5); @@ -1444,14 +1445,14 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, DRM_DEBUG("IH: UVD TRAP\n"); switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.ring); + amdgpu_fence_process(&adev->uvd.inst->ring); break; case 119: - amdgpu_fence_process(&adev->uvd.ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); break; case 120: if (!amdgpu_sriov_vf(adev)) - amdgpu_fence_process(&adev->uvd.ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1719,7 +1720,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v7_0_ring_vm_funcs; + adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs; DRM_INFO("UVD is enabled in VM mode\n"); } @@ -1728,7 +1729,7 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) int i; for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; DRM_INFO("UVD ENC is enabled in VM mode\n"); } @@ -1740,8 +1741,8 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.irq.num_types = adev->uvd.num_enc_rings + 1; - adev->uvd.irq.funcs = &uvd_v7_0_irq_funcs; + adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs; } const struct amdgpu_ip_block_version uvd_v7_0_ip_block = -- cgit v1.2.1 From 10dd74eac4dba963bfa97f5092040aa75ff742d6 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Tue, 15 May 2018 14:31:24 -0500 Subject: drm/amdgpu/vg20:Restruct uvd.inst to support multiple instances Vega20 has dual-UVD. Need add multiple instances support for uvd. Restruct uvd.inst, using uvd.inst[0] to replace uvd.inst->. Repurpose amdgpu_ring::me for instance index, and initialize to 0. There are no any logical changes here. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 229 +++---- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 1002 +++++++++++++++-------------- 5 files changed, 660 insertions(+), 590 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 1070f4042cbb..39ec6b8890a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -376,14 +376,14 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; uint64_t index; - if (ring != &adev->uvd.inst->ring) { + if (ring != &adev->uvd.inst[ring->me].ring) { ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs]; ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4); } else { /* put fence directly behind firmware */ index = ALIGN(adev->uvd.fw->size, 8); - ring->fence_drv.cpu_addr = adev->uvd.inst->cpu_addr + index; - ring->fence_drv.gpu_addr = adev->uvd.inst->gpu_addr + index; + ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index; + ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index; } amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq)); amdgpu_irq_get(adev, irq_src, irq_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5620ed291107..91517b166a3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -286,7 +286,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_crtc *crtc; uint32_t ui32 = 0; uint64_t ui64 = 0; - int i, found; + int i, j, found; int ui32_size = sizeof(ui32); if (!info->return_size || !info->return_pointer) @@ -348,7 +348,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD: type = AMD_IP_BLOCK_TYPE_UVD; - ring_mask = adev->uvd.inst->ring.ready ? 1 : 0; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 16; break; @@ -361,8 +362,11 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file break; case AMDGPU_HW_IP_UVD_ENC: type = AMD_IP_BLOCK_TYPE_UVD; - for (i = 0; i < adev->uvd.num_enc_rings; i++) - ring_mask |= ((adev->uvd.inst->ring_enc[i].ready ? 1 : 0) << i); + for (i = 0; i < adev->uvd.num_uvd_inst; i++) + for (j = 0; j < adev->uvd.num_enc_rings; j++) + ring_mask |= + ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) << + (j + i * adev->uvd.num_enc_rings)); ib_start_alignment = AMDGPU_GPU_PAGE_SIZE; ib_size_alignment = 1; break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 49cad08b5c16..c6850b629d0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -362,6 +362,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; + ring->me = 0; ring->adev->rings[ring->idx] = NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 02683a039a98..e961492d357a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -127,7 +127,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) const char *fw_name; const struct common_firmware_header *hdr; unsigned version_major, version_minor, family_id; - int i, r; + int i, j, r; INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); @@ -236,28 +236,30 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); - r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst->vcpu_bo, - &adev->uvd.inst->gpu_addr, &adev->uvd.inst->cpu_addr); - if (r) { - dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); - return r; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { - ring = &adev->uvd.inst->ring; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up UVD run queue.\n"); - return r; - } + r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr); + if (r) { + dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); + return r; + } - for (i = 0; i < adev->uvd.max_handles; ++i) { - atomic_set(&adev->uvd.inst->handles[i], 0); - adev->uvd.inst->filp[i] = NULL; - } + ring = &adev->uvd.inst[j].ring; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity, + rq, NULL); + if (r != 0) { + DRM_ERROR("Failed setting up UVD(%d) run queue.\n", j); + return r; + } + for (i = 0; i < adev->uvd.max_handles; ++i) { + atomic_set(&adev->uvd.inst[j].handles[i], 0); + adev->uvd.inst[j].filp[i] = NULL; + } + } /* from uvd v5.0 HW addressing capacity increased to 64 bits */ if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; @@ -284,20 +286,22 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { - int i; - kfree(adev->uvd.inst->saved_bo); + int i, j; - drm_sched_entity_fini(&adev->uvd.inst->ring.sched, &adev->uvd.inst->entity); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + kfree(adev->uvd.inst[j].saved_bo); - amdgpu_bo_free_kernel(&adev->uvd.inst->vcpu_bo, - &adev->uvd.inst->gpu_addr, - (void **)&adev->uvd.inst->cpu_addr); + drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); - amdgpu_ring_fini(&adev->uvd.inst->ring); + amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, + &adev->uvd.inst[j].gpu_addr, + (void **)&adev->uvd.inst[j].cpu_addr); - for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) - amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); + amdgpu_ring_fini(&adev->uvd.inst[j].ring); + for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } release_firmware(adev->uvd.fw); return 0; @@ -307,32 +311,33 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) { unsigned size; void *ptr; - int i; + int i, j; - if (adev->uvd.inst->vcpu_bo == NULL) - return 0; + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + if (adev->uvd.inst[j].vcpu_bo == NULL) + continue; - cancel_delayed_work_sync(&adev->uvd.inst->idle_work); + cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); - /* only valid for physical mode */ - if (adev->asic_type < CHIP_POLARIS10) { - for (i = 0; i < adev->uvd.max_handles; ++i) - if (atomic_read(&adev->uvd.inst->handles[i])) - break; + /* only valid for physical mode */ + if (adev->asic_type < CHIP_POLARIS10) { + for (i = 0; i < adev->uvd.max_handles; ++i) + if (atomic_read(&adev->uvd.inst[j].handles[i])) + break; - if (i == adev->uvd.max_handles) - return 0; - } - - size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); - ptr = adev->uvd.inst->cpu_addr; + if (i == adev->uvd.max_handles) + continue; + } - adev->uvd.inst->saved_bo = kmalloc(size, GFP_KERNEL); - if (!adev->uvd.inst->saved_bo) - return -ENOMEM; + size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo); + ptr = adev->uvd.inst[j].cpu_addr; - memcpy_fromio(adev->uvd.inst->saved_bo, ptr, size); + adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.inst[j].saved_bo) + return -ENOMEM; + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + } return 0; } @@ -340,59 +345,65 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; + int i; - if (adev->uvd.inst->vcpu_bo == NULL) - return -EINVAL; + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + if (adev->uvd.inst[i].vcpu_bo == NULL) + return -EINVAL; - size = amdgpu_bo_size(adev->uvd.inst->vcpu_bo); - ptr = adev->uvd.inst->cpu_addr; + size = amdgpu_bo_size(adev->uvd.inst[i].vcpu_bo); + ptr = adev->uvd.inst[i].cpu_addr; - if (adev->uvd.inst->saved_bo != NULL) { - memcpy_toio(ptr, adev->uvd.inst->saved_bo, size); - kfree(adev->uvd.inst->saved_bo); - adev->uvd.inst->saved_bo = NULL; - } else { - const struct common_firmware_header *hdr; - unsigned offset; - - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; - if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy_toio(adev->uvd.inst->cpu_addr, adev->uvd.fw->data + offset, - le32_to_cpu(hdr->ucode_size_bytes)); - size -= le32_to_cpu(hdr->ucode_size_bytes); - ptr += le32_to_cpu(hdr->ucode_size_bytes); + if (adev->uvd.inst[i].saved_bo != NULL) { + memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size); + kfree(adev->uvd.inst[i].saved_bo); + adev->uvd.inst[i].saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset, + le32_to_cpu(hdr->ucode_size_bytes)); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + } + memset_io(ptr, 0, size); + /* to restore uvd fence seq */ + amdgpu_fence_driver_force_completion(&adev->uvd.inst[i].ring); } - memset_io(ptr, 0, size); - /* to restore uvd fence seq */ - amdgpu_fence_driver_force_completion(&adev->uvd.inst->ring); } - return 0; } void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) { - struct amdgpu_ring *ring = &adev->uvd.inst->ring; - int i, r; + struct amdgpu_ring *ring; + int i, j, r; - for (i = 0; i < adev->uvd.max_handles; ++i) { - uint32_t handle = atomic_read(&adev->uvd.inst->handles[i]); - if (handle != 0 && adev->uvd.inst->filp[i] == filp) { - struct dma_fence *fence; - - r = amdgpu_uvd_get_destroy_msg(ring, handle, - false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD (%d)!\n", r); - continue; - } + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring; - dma_fence_wait(fence, false); - dma_fence_put(fence); + for (i = 0; i < adev->uvd.max_handles; ++i) { + uint32_t handle = atomic_read(&adev->uvd.inst[j].handles[i]); + if (handle != 0 && adev->uvd.inst[j].filp[i] == filp) { + struct dma_fence *fence; + + r = amdgpu_uvd_get_destroy_msg(ring, handle, + false, &fence); + if (r) { + DRM_ERROR("Error destroying UVD(%d) %d!\n", j, r); + continue; + } - adev->uvd.inst->filp[i] = NULL; - atomic_set(&adev->uvd.inst->handles[i], 0); + dma_fence_wait(fence, false); + dma_fence_put(fence); + + adev->uvd.inst[j].filp[i] = NULL; + atomic_set(&adev->uvd.inst[j].handles[i], 0); + } } } } @@ -667,15 +678,16 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, void *ptr; long r; int i; + uint32_t ip_instance = ctx->parser->job->ring->me; if (offset & 0x3F) { - DRM_ERROR("UVD messages must be 64 byte aligned!\n"); + DRM_ERROR("UVD(%d) messages must be 64 byte aligned!\n", ip_instance); return -EINVAL; } r = amdgpu_bo_kmap(bo, &ptr); if (r) { - DRM_ERROR("Failed mapping the UVD message (%ld)!\n", r); + DRM_ERROR("Failed mapping the UVD(%d) message (%ld)!\n", ip_instance, r); return r; } @@ -685,7 +697,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, handle = msg[2]; if (handle == 0) { - DRM_ERROR("Invalid UVD handle!\n"); + DRM_ERROR("Invalid UVD(%d) handle!\n", ip_instance); return -EINVAL; } @@ -696,18 +708,18 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* try to alloc a new handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { - DRM_ERROR("Handle 0x%x already in use!\n", handle); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + DRM_ERROR("(%d)Handle 0x%x already in use!\n", ip_instance, handle); return -EINVAL; } - if (!atomic_cmpxchg(&adev->uvd.inst->handles[i], 0, handle)) { - adev->uvd.inst->filp[i] = ctx->parser->filp; + if (!atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], 0, handle)) { + adev->uvd.inst[ip_instance].filp[i] = ctx->parser->filp; return 0; } } - DRM_ERROR("No more free UVD handles!\n"); + DRM_ERROR("No more free UVD(%d) handles!\n", ip_instance); return -ENOSPC; case 1: @@ -719,27 +731,27 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, /* validate the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) { - if (atomic_read(&adev->uvd.inst->handles[i]) == handle) { - if (adev->uvd.inst->filp[i] != ctx->parser->filp) { - DRM_ERROR("UVD handle collision detected!\n"); + if (atomic_read(&adev->uvd.inst[ip_instance].handles[i]) == handle) { + if (adev->uvd.inst[ip_instance].filp[i] != ctx->parser->filp) { + DRM_ERROR("UVD(%d) handle collision detected!\n", ip_instance); return -EINVAL; } return 0; } } - DRM_ERROR("Invalid UVD handle 0x%x!\n", handle); + DRM_ERROR("Invalid UVD(%d) handle 0x%x!\n", ip_instance, handle); return -ENOENT; case 2: /* it's a destroy msg, free the handle */ for (i = 0; i < adev->uvd.max_handles; ++i) - atomic_cmpxchg(&adev->uvd.inst->handles[i], handle, 0); + atomic_cmpxchg(&adev->uvd.inst[ip_instance].handles[i], handle, 0); amdgpu_bo_kunmap(bo); return 0; default: - DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + DRM_ERROR("Illegal UVD(%d) message type (%d)!\n", ip_instance, msg_type); return -EINVAL; } BUG(); @@ -1043,7 +1055,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, ring, &adev->uvd.inst->entity, + r = amdgpu_job_submit(job, ring, &adev->uvd.inst[ring->me].entity, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; @@ -1189,27 +1201,28 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct dma_fence *fence; long r; + uint32_t ip_instance = ring->me; r = amdgpu_uvd_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get create msg (%ld).\n", ip_instance, r); goto error; } r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)failed to get destroy ib (%ld).\n", ip_instance, r); goto error; } r = dma_fence_wait_timeout(fence, false, timeout); if (r == 0) { - DRM_ERROR("amdgpu: IB test timed out.\n"); + DRM_ERROR("amdgpu: (%d)IB test timed out.\n", ip_instance); r = -ETIMEDOUT; } else if (r < 0) { - DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + DRM_ERROR("amdgpu: (%d)fence wait failed (%ld).\n", ip_instance, r); } else { - DRM_DEBUG("ib test on ring %d succeeded\n", ring->idx); + DRM_DEBUG("ib test on (%d)ring %d succeeded\n", ip_instance, ring->idx); r = 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 66d4bea5fb2c..08f3b6c84bea 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -58,7 +58,7 @@ static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR); } /** @@ -72,10 +72,10 @@ static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - if (ring == &adev->uvd.inst->ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2); } /** @@ -89,7 +89,7 @@ static uint64_t uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - return RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR); + return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR); } /** @@ -106,10 +106,10 @@ static uint64_t uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) if (ring->use_doorbell) return adev->wb.wb[ring->wptr_offs]; - if (ring == &adev->uvd.inst->ring_enc[0]) - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR); + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR); else - return RREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2); + return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2); } /** @@ -123,7 +123,7 @@ static void uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); } /** @@ -144,11 +144,11 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) return; } - if (ring == &adev->uvd.inst->ring_enc[0]) - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, + if (ring == &adev->uvd.inst[ring->me].ring_enc[0]) + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); else - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, + WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); } @@ -387,19 +387,21 @@ static int uvd_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; struct drm_sched_rq *rq; - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst->irq); - if (r) - return r; - - /* UVD ENC TRAP */ - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst->irq); + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + /* UVD TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq); if (r) return r; + + /* UVD ENC TRAP */ + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq); + if (r) + return r; + } } r = amdgpu_uvd_sw_init(adev); @@ -416,43 +418,48 @@ static int uvd_v7_0_sw_init(void *handle) DRM_INFO("PSP loading UVD firmware\n"); } - ring = &adev->uvd.inst->ring_enc[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst->entity_enc, - rq, NULL); - if (r) { - DRM_ERROR("Failed setting up UVD ENC run queue.\n"); - return r; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + ring = &adev->uvd.inst[j].ring_enc[0]; + rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; + r = drm_sched_entity_init(&ring->sched, &adev->uvd.inst[j].entity_enc, + rq, NULL); + if (r) { + DRM_ERROR("(%d)Failed setting up UVD ENC run queue.\n", j); + return r; + } } r = amdgpu_uvd_resume(adev); if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - ring = &adev->uvd.inst->ring; - sprintf(ring->name, "uvd"); - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); - if (r) - return r; - } - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.inst->ring_enc[i]; - sprintf(ring->name, "uvd_enc%d", i); - if (amdgpu_sriov_vf(adev)) { - ring->use_doorbell = true; - - /* currently only use the first enconding ring for - * sriov, so set unused location for other unused rings. - */ - if (i == 0) - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; - else - ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + if (!amdgpu_sriov_vf(adev)) { + ring = &adev->uvd.inst[j].ring; + sprintf(ring->name, "uvd<%d>", j); + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; + } + + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + sprintf(ring->name, "uvd_enc%d<%d>", i, j); + if (amdgpu_sriov_vf(adev)) { + ring->use_doorbell = true; + + /* currently only use the first enconding ring for + * sriov, so set unused location for other unused rings. + */ + if (i == 0) + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING0_1 * 2; + else + ring->doorbell_index = AMDGPU_DOORBELL64_UVD_RING2_3 * 2 + 1; + } + r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0); + if (r) + return r; } - r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); - if (r) - return r; } r = amdgpu_virt_alloc_mm_table(adev); @@ -464,7 +471,7 @@ static int uvd_v7_0_sw_init(void *handle) static int uvd_v7_0_sw_fini(void *handle) { - int i, r; + int i, j, r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; amdgpu_virt_free_mm_table(adev); @@ -473,11 +480,12 @@ static int uvd_v7_0_sw_fini(void *handle) if (r) return r; - drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); + for (i = 0; i < adev->uvd.num_enc_rings; ++i) + amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); + } return amdgpu_uvd_sw_fini(adev); } @@ -491,9 +499,9 @@ static int uvd_v7_0_sw_fini(void *handle) static int uvd_v7_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + struct amdgpu_ring *ring; uint32_t tmp; - int i, r; + int i, j, r; if (amdgpu_sriov_vf(adev)) r = uvd_v7_0_sriov_start(adev); @@ -502,57 +510,60 @@ static int uvd_v7_0_hw_init(void *handle) if (r) goto done; - if (!amdgpu_sriov_vf(adev)) { - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; + for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { + ring = &adev->uvd.inst[j].ring; + + if (!amdgpu_sriov_vf(adev)) { + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } + + r = amdgpu_ring_alloc(ring, 10); + if (r) { + DRM_ERROR("amdgpu: (%d)ring failed to lock UVD ring (%d).\n", j, r); + goto done; + } + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + tmp = PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); + amdgpu_ring_write(ring, tmp); + amdgpu_ring_write(ring, 0xFFFFF); + + /* Clear timeout status bits */ + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_TIMEOUT_STATUS), 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, j, + mmUVD_SEMA_CNTL), 0)); + amdgpu_ring_write(ring, 3); + + amdgpu_ring_commit(ring); } - r = amdgpu_ring_alloc(ring, 10); - if (r) { - DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); - goto done; + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + ring = &adev->uvd.inst[j].ring_enc[i]; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) { + ring->ready = false; + goto done; + } } - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - tmp = PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL), 0); - amdgpu_ring_write(ring, tmp); - amdgpu_ring_write(ring, 0xFFFFF); - - /* Clear timeout status bits */ - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_TIMEOUT_STATUS), 0)); - amdgpu_ring_write(ring, 0x8); - - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, - mmUVD_SEMA_CNTL), 0)); - amdgpu_ring_write(ring, 3); - - amdgpu_ring_commit(ring); } - - for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - ring = &adev->uvd.inst->ring_enc[i]; - ring->ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->ready = false; - goto done; - } - } - done: if (!r) DRM_INFO("UVD and UVD ENC initialized successfully.\n"); @@ -570,7 +581,7 @@ done: static int uvd_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + int i; if (!amdgpu_sriov_vf(adev)) uvd_v7_0_stop(adev); @@ -579,7 +590,8 @@ static int uvd_v7_0_hw_fini(void *handle) DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); } - ring->ready = false; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) + adev->uvd.inst[i].ring.ready = false; return 0; } @@ -619,48 +631,51 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev) { uint32_t size = AMDGPU_UVD_FIRMWARE_SIZE(adev); uint32_t offset; + int i; - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr)); - offset = size; - } + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr + offset)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); - - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, - lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, - upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - WREG32_SOC15(UVD, 0, mmUVD_UDEC_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DB_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - WREG32_SOC15(UVD, 0, mmUVD_UDEC_DBW_ADDR_CONFIG, - adev->gfx.config.gb_addr_config); - - WREG32_SOC15(UVD, 0, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, (1 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_UVD_HEAP_SIZE); + + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, (2 << 21)); + WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + WREG32_SOC15(UVD, i, mmUVD_UDEC_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DB_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + WREG32_SOC15(UVD, i, mmUVD_UDEC_DBW_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + WREG32_SOC15(UVD, i, mmUVD_GP_SCRATCH4, adev->uvd.max_handles); + } } static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, @@ -670,6 +685,7 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, uint64_t addr = table->gpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)table->cpu_addr; uint32_t size; + int i; size = header->header_size + header->vce_table_size + header->uvd_table_size; @@ -689,11 +705,12 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev, /* 4, set resp to zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0); - WDOORBELL32(adev->uvd.inst->ring_enc[0].doorbell_index, 0); - adev->wb.wb[adev->uvd.inst->ring_enc[0].wptr_offs] = 0; - adev->uvd.inst->ring_enc[0].wptr = 0; - adev->uvd.inst->ring_enc[0].wptr_old = 0; - + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0); + adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0; + adev->uvd.inst[i].ring_enc[0].wptr = 0; + adev->uvd.inst[i].ring_enc[0].wptr_old = 0; + } /* 5, kick off the initialization and wait until VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero */ WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_HOST, 0x10000001); @@ -726,6 +743,7 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) struct mmsch_v1_0_cmd_end end = { {0} }; uint32_t *init_table = adev->virt.mm_table.cpu_addr; struct mmsch_v1_0_init_header *header = (struct mmsch_v1_0_init_header *)init_table; + uint8_t i = 0; direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; @@ -743,120 +761,121 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) init_table += header->uvd_table_offset; - ring = &adev->uvd.inst->ring; - ring->wptr = 0; - size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); - - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - 0xFFFFFFFF, 0x00000004); - /* mc resume*/ - if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); - offset = 0; - } else { - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr)); - offset = size; + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + ring = &adev->uvd.inst[i].ring; + ring->wptr = 0; + size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4); + + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + 0xFFFFFFFF, 0x00000004); + /* mc resume*/ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr)); + offset = 0; + } else { + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr)); + offset = size; + } + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), size); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->uvd.inst[i].gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), + AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); + + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); + /* mc resume end*/ + + /* disable clock gating */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_CGC_CTRL), + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); + + /* disable interupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); + + /* stall UMC and register bus before resetting VCPU */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + /* put LMI, VCPU, RBC etc... into reset */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); + + /* initialize UVD memory controller */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL), + (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L)); + + /* take all subblocks out of reset, except VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + + /* enable VCPU clock */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK); + + /* enable master interrupt */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); + + /* clear the bit 4 of UVD_STATUS */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); + + /* force RBC into idle state */ + size = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); + + ring = &adev->uvd.inst[i].ring_enc[0]; + ring->wptr = 0; + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), ring->gpu_addr); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), ring->ring_size / 4); + + /* boot up the VCPU */ + MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_SOFT_RESET), 0); + + /* enable UMC */ + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); + + MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0x02, 0x02); } - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), - AMDGPU_UVD_FIRMWARE_OFFSET >> 3); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr + offset)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), (1 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_UVD_HEAP_SIZE); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), - lower_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), - upper_32_bits(adev->uvd.inst->gpu_addr + offset + AMDGPU_UVD_HEAP_SIZE)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), (2 << 21)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CACHE_SIZE2), - AMDGPU_UVD_STACK_SIZE + (AMDGPU_UVD_SESSION_SIZE * 40)); - - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH4), adev->uvd.max_handles); - /* mc resume end*/ - - /* disable clock gating */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK, 0); - - /* disable interupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~UVD_MASTINT_EN__VCPU_EN_MASK, 0); - - /* stall UMC and register bus before resetting VCPU */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - - /* put LMI, VCPU, RBC etc... into reset */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - (uint32_t)(UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK)); - - /* initialize UVD memory controller */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL), - (uint32_t)((0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L)); - - /* take all subblocks out of reset, except VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - - /* enable VCPU clock */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_VCPU_CNTL), - UVD_VCPU_CNTL__CLK_EN_MASK); - - /* enable master interrupt */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT), 0); - - /* force RBC into idle state */ - size = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, size); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), tmp); - - ring = &adev->uvd.inst->ring_enc[0]; - ring->wptr = 0; - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_LO), ring->gpu_addr); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_BASE_HI), upper_32_bits(ring->gpu_addr)); - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_RB_SIZE), ring->ring_size / 4); - - /* boot up the VCPU */ - MMSCH_V1_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0); - - /* enable UMC */ - MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, 0); - - MMSCH_V1_0_INSERT_DIRECT_POLL(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0x02, 0x02); - /* add end packet */ memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; @@ -875,15 +894,17 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev) */ static int uvd_v7_0_start(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.inst->ring; + struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; uint32_t lmi_swap_cntl; uint32_t mp_swap_cntl; - int i, j, r; + int i, j, k, r; - /* disable DPG */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS), 0, - ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + /* disable DPG */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + } /* disable byte swapping */ lmi_swap_cntl = 0; @@ -891,157 +912,159 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) uvd_v7_0_mc_resume(adev); - /* disable clock gating */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_CGC_CTRL), 0, - ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - - /* disable interupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, - ~UVD_MASTINT_EN__VCPU_EN_MASK); - - /* stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put LMI, VCPU, RBC etc... into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | - UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | - UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | - UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | - UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); - mdelay(5); + for (k = 0; k < adev->uvd.num_uvd_inst; ++k) { + ring = &adev->uvd.inst[k].ring; + /* disable clock gating */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0, + ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK); - /* initialize UVD memory controller */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_CTRL, - (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | - UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | - UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | - UVD_LMI_CTRL__REQ_MODE_MASK | - 0x00100000L); + /* disable interupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); + + /* put LMI, VCPU, RBC etc... into reset */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); + mdelay(5); + + /* initialize UVD memory controller */ + WREG32_SOC15(UVD, k, mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + 0x00100000L); #ifdef __BIG_ENDIAN - /* swap (8 in 32) RB and IB */ - lmi_swap_cntl = 0xa; - mp_swap_cntl = 0; + /* swap (8 in 32) RB and IB */ + lmi_swap_cntl = 0xa; + mp_swap_cntl = 0; #endif - WREG32_SOC15(UVD, 0, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); - WREG32_SOC15(UVD, 0, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXA1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB0, 0x40c2040); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUXB1, 0x0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_ALU, 0); - WREG32_SOC15(UVD, 0, mmUVD_MPC_SET_MUX, 0x88); - - /* take all subblocks out of reset, except VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + WREG32_SOC15(UVD, k, mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); + WREG32_SOC15(UVD, k, mmUVD_MP_SWAP_CNTL, mp_swap_cntl); - /* enable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, - UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXA1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB0, 0x40c2040); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUXB1, 0x0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_ALU, 0); + WREG32_SOC15(UVD, k, mmUVD_MPC_SET_MUX, 0x88); - /* enable UMC */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* take all subblocks out of reset, except VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); - /* boot up the VCPU */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, 0); - mdelay(10); + /* enable VCPU clock */ + WREG32_SOC15(UVD, k, mmUVD_VCPU_CNTL, + UVD_VCPU_CNTL__CLK_EN_MASK); - for (i = 0; i < 10; ++i) { - uint32_t status; + /* enable UMC */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - for (j = 0; j < 100; ++j) { - status = RREG32_SOC15(UVD, 0, mmUVD_STATUS); + /* boot up the VCPU */ + WREG32_SOC15(UVD, k, mmUVD_SOFT_RESET, 0); + mdelay(10); + + for (i = 0; i < 10; ++i) { + uint32_t status; + + for (j = 0; j < 100; ++j) { + status = RREG32_SOC15(UVD, k, mmUVD_STATUS); + if (status & 2) + break; + mdelay(10); + } + r = 0; if (status & 2) break; + + DRM_ERROR("UVD(%d) not responding, trying to reset the VCPU!!!\n", k); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_SOFT_RESET), 0, + ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); mdelay(10); + r = -1; } - r = 0; - if (status & 2) - break; - DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_SOFT_RESET), 0, - ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(10); - r = -1; - } - - if (r) { - DRM_ERROR("UVD not responding, giving up!!!\n"); - return r; - } - /* enable master interrupt */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), - (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), - ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - - /* clear the bit 4 of UVD_STATUS */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_STATUS), 0, - ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); - - /* force RBC into idle state */ - rb_bufsz = order_base_2(ring->ring_size); - tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); - tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp); - - /* set the write pointer delay */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0); - - /* set the wb address */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR, - (upper_32_bits(ring->gpu_addr) >> 2)); - - /* programm the RB_BASE for ring buffer */ - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, - lower_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, - upper_32_bits(ring->gpu_addr)); - - /* Initialize the ring buffer's read and write pointers */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR, 0); - - ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR); - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR, - lower_32_bits(ring->wptr)); - - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_RB_CNTL), 0, - ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); - - ring = &adev->uvd.inst->ring_enc[0]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4); + if (r) { + DRM_ERROR("UVD(%d) not responding, giving up!!!\n", k); + return r; + } + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_MASTINT_EN), + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); - ring = &adev->uvd.inst->ring_enc[1]; - WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr); - WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); - WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4); + /* clear the bit 4 of UVD_STATUS */ + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + /* force RBC into idle state */ + rb_bufsz = order_base_2(ring->ring_size); + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_CNTL, tmp); + + /* set the write pointer delay */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR_CNTL, 0); + + /* set the wb address */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR, + (upper_32_bits(ring->gpu_addr) >> 2)); + + /* programm the RB_BASE for ring buffer */ + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, + lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, + upper_32_bits(ring->gpu_addr)); + + /* Initialize the ring buffer's read and write pointers */ + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR, 0); + + ring->wptr = RREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR); + WREG32_SOC15(UVD, k, mmUVD_RBC_RB_WPTR, + lower_32_bits(ring->wptr)); + + WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_RBC_RB_CNTL), 0, + ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK); + + ring = &adev->uvd.inst[k].ring_enc[0]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE, ring->ring_size / 4); + + ring = &adev->uvd.inst[k].ring_enc[1]; + WREG32_SOC15(UVD, k, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_LO2, ring->gpu_addr); + WREG32_SOC15(UVD, k, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(UVD, k, mmUVD_RB_SIZE2, ring->ring_size / 4); + } return 0; } @@ -1054,26 +1077,30 @@ static int uvd_v7_0_start(struct amdgpu_device *adev) */ static void uvd_v7_0_stop(struct amdgpu_device *adev) { - /* force RBC into idle state */ - WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, 0x11010101); - - /* Stall UMC and register bus before resetting VCPU */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), - UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - mdelay(1); - - /* put VCPU into reset */ - WREG32_SOC15(UVD, 0, mmUVD_SOFT_RESET, - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); - mdelay(5); + uint8_t i = 0; + + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + /* force RBC into idle state */ + WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101); - /* disable VCPU clock */ - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CNTL, 0x0); + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), + UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + mdelay(1); - /* Unstall UMC and register bus */ - WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, - ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + /* put VCPU into reset */ + WREG32_SOC15(UVD, i, mmUVD_SOFT_RESET, + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); + mdelay(5); + + /* disable VCPU clock */ + WREG32_SOC15(UVD, i, mmUVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + } } /** @@ -1092,26 +1119,26 @@ static void uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, addr & 0xffffffff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, 0); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 2); } @@ -1160,7 +1187,7 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) unsigned i; int r; - WREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID, 0xCAFEDEAD); + WREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID, 0xCAFEDEAD); r = amdgpu_ring_alloc(ring, 3); if (r) { DRM_ERROR("amdgpu: (%d)cp failed to lock ring %d (%d).\n", @@ -1168,11 +1195,11 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) return r; } amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_CONTEXT_ID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_CONTEXT_ID), 0)); amdgpu_ring_write(ring, 0xDEADBEEF); amdgpu_ring_commit(ring); for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SOC15(UVD, 0, mmUVD_CONTEXT_ID); + tmp = RREG32_SOC15(UVD, ring->me, mmUVD_CONTEXT_ID); if (tmp == 0xDEADBEEF) break; DRM_UDELAY(1); @@ -1204,17 +1231,17 @@ static void uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_VMID), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_VMID), 0)); amdgpu_ring_write(ring, vmid); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_LOW), 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH), 0)); amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_RBC_IB_SIZE), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_RBC_IB_SIZE), 0)); amdgpu_ring_write(ring, ib->length_dw); } @@ -1242,13 +1269,13 @@ static void uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 8); } @@ -1258,16 +1285,16 @@ static void uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, struct amdgpu_device *adev = ring->adev; amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA0), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA0), 0)); amdgpu_ring_write(ring, reg << 2); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_DATA1), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_DATA1), 0)); amdgpu_ring_write(ring, val); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GP_SCRATCH8), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GP_SCRATCH8), 0)); amdgpu_ring_write(ring, mask); amdgpu_ring_write(ring, - PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_GPCOM_VCPU_CMD), 0)); + PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_GPCOM_VCPU_CMD), 0)); amdgpu_ring_write(ring, 12); } @@ -1292,7 +1319,7 @@ static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) struct amdgpu_device *adev = ring->adev; for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); } @@ -1360,16 +1387,16 @@ static bool uvd_v7_0_check_soft_reset(void *handle) if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || - (RREG32_SOC15(UVD, 0, mmUVD_STATUS) & + (RREG32_SOC15(UVD, ring->me, mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); if (srbm_soft_reset) { - adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; + adev->uvd.inst[ring->me].srbm_soft_reset = srbm_soft_reset; return true; } else { - adev->uvd.inst->srbm_soft_reset = 0; + adev->uvd.inst[ring->me].srbm_soft_reset = 0; return false; } } @@ -1378,7 +1405,7 @@ static int uvd_v7_0_pre_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; uvd_v7_0_stop(adev); @@ -1390,9 +1417,9 @@ static int uvd_v7_0_soft_reset(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; u32 srbm_soft_reset; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; - srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; + srbm_soft_reset = adev->uvd.inst[ring->me].srbm_soft_reset; if (srbm_soft_reset) { u32 tmp; @@ -1420,7 +1447,7 @@ static int uvd_v7_0_post_soft_reset(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!adev->uvd.inst->srbm_soft_reset) + if (!adev->uvd.inst[ring->me].srbm_soft_reset) return 0; mdelay(5); @@ -1442,17 +1469,29 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { + uint32_t ip_instance; + + switch (entry->client_id) { + case SOC15_IH_CLIENTID_UVD: + ip_instance = 0; + break; + default: + DRM_ERROR("Unhandled client id: %d\n", entry->client_id); + return 0; + } + DRM_DEBUG("IH: UVD TRAP\n"); + switch (entry->src_id) { case 124: - amdgpu_fence_process(&adev->uvd.inst->ring); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring); break; case 119: - amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[0]); break; case 120: if (!amdgpu_sriov_vf(adev)) - amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); + amdgpu_fence_process(&adev->uvd.inst[ip_instance].ring_enc[1]); break; default: DRM_ERROR("Unhandled interrupt: %d %d\n", @@ -1468,9 +1507,9 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, data2, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); - data2 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); + data2 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL); data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); @@ -1514,18 +1553,18 @@ static void uvd_v7_0_set_sw_clock_gating(struct amdgpu_device *adev) UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_CTRL, data); - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, 0); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_CTRL, data2); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_CTRL, data); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, 0); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_CTRL, data2); } static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) { uint32_t data, data1, cgc_flags, suvd_flags; - data = RREG32_SOC15(UVD, 0, mmUVD_CGC_GATE); - data1 = RREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE); + data = RREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE); + data1 = RREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE); cgc_flags = UVD_CGC_GATE__SYS_MASK | UVD_CGC_GATE__UDEC_MASK | @@ -1557,8 +1596,8 @@ static void uvd_v7_0_set_hw_clock_gating(struct amdgpu_device *adev) data |= cgc_flags; data1 |= suvd_flags; - WREG32_SOC15(UVD, 0, mmUVD_CGC_GATE, data); - WREG32_SOC15(UVD, 0, mmUVD_SUVD_CGC_GATE, data1); + WREG32_SOC15(UVD, ring->me, mmUVD_CGC_GATE, data); + WREG32_SOC15(UVD, ring->me, mmUVD_SUVD_CGC_GATE, data1); } static void uvd_v7_0_set_bypass_mode(struct amdgpu_device *adev, bool enable) @@ -1617,7 +1656,7 @@ static int uvd_v7_0_set_powergating_state(void *handle, if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) return 0; - WREG32_SOC15(UVD, 0, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); + WREG32_SOC15(UVD, ring->me, mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); if (state == AMD_PG_STATE_GATE) { uvd_v7_0_stop(adev); @@ -1720,18 +1759,27 @@ static const struct amdgpu_ring_funcs uvd_v7_0_enc_ring_vm_funcs = { static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.inst->ring.funcs = &uvd_v7_0_ring_vm_funcs; - DRM_INFO("UVD is enabled in VM mode\n"); + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs; + adev->uvd.inst[i].ring.me = i; + DRM_INFO("UVD(%d) is enabled in VM mode\n", i); + } } static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev) { - int i; + int i, j; - for (i = 0; i < adev->uvd.num_enc_rings; ++i) - adev->uvd.inst->ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + for (j = 0; j < adev->uvd.num_uvd_inst; j++) { + for (i = 0; i < adev->uvd.num_enc_rings; ++i) { + adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs; + adev->uvd.inst[j].ring_enc[i].me = j; + } - DRM_INFO("UVD ENC is enabled in VM mode\n"); + DRM_INFO("UVD(%d) ENC is enabled in VM mode\n", j); + } } static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { @@ -1741,8 +1789,12 @@ static const struct amdgpu_irq_src_funcs uvd_v7_0_irq_funcs = { static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev) { - adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; - adev->uvd.inst->irq.funcs = &uvd_v7_0_irq_funcs; + int i; + + for (i = 0; i < adev->uvd.num_uvd_inst; i++) { + adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1; + adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs; + } } const struct amdgpu_ip_block_version uvd_v7_0_ip_block = -- cgit v1.2.1 From 3b17c622856299a0b0eef02a409edec366a719a7 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 23 Apr 2018 19:11:46 -0400 Subject: drm/amdgpu/vg20:increase 3 rings for AMDGPU_MAX_RINGS For Vega20, there are two UVD Hardware. One more UVD hardware adds one decode ring and two encode rings. So AMDGPU_MAX_RINGS need increase by 3. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 4f8dac2d36a5..1513124c5659 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -29,7 +29,7 @@ #include /* max number of rings */ -#define AMDGPU_MAX_RINGS 18 +#define AMDGPU_MAX_RINGS 21 #define AMDGPU_MAX_GFX_RINGS 1 #define AMDGPU_MAX_COMPUTE_RINGS 8 #define AMDGPU_MAX_VCE_RINGS 3 -- cgit v1.2.1 From 9181dba670cf0a0e8e3bda9fa66fecfe7c28b535 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Fri, 11 May 2018 13:56:44 -0500 Subject: drm/amdgpu/vg20:Enable the 2nd instance for uvd For Vega20, set num of uvd instance to 2, to enble 2nd instance. The IB test build-in registers need update for vega20 2nd instance. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 30 ++++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 7 ++++++- 2 files changed, 22 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index e961492d357a..0772680371a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -72,11 +72,12 @@ #define FIRMWARE_VEGA12 "amdgpu/vega12_uvd.bin" #define FIRMWARE_VEGA20 "amdgpu/vega20_uvd.bin" -#define mmUVD_GPCOM_VCPU_DATA0_VEGA10 (0x03c4 + 0x7e00) -#define mmUVD_GPCOM_VCPU_DATA1_VEGA10 (0x03c5 + 0x7e00) -#define mmUVD_GPCOM_VCPU_CMD_VEGA10 (0x03c3 + 0x7e00) -#define mmUVD_NO_OP_VEGA10 (0x03ff + 0x7e00) -#define mmUVD_ENGINE_CNTL_VEGA10 (0x03c6 + 0x7e00) +/* These are common relative offsets for all asics, from uvd_7_0_offset.h, */ +#define UVD_GPCOM_VCPU_CMD 0x03c3 +#define UVD_GPCOM_VCPU_DATA0 0x03c4 +#define UVD_GPCOM_VCPU_DATA1 0x03c5 +#define UVD_NO_OP 0x03ff +#define UVD_BASE_SI 0x3800 /** * amdgpu_uvd_cs_ctx - Command submission parser context @@ -990,6 +991,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, uint64_t addr; long r; int i; + unsigned offset_idx = 0; + unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; amdgpu_bo_kunmap(bo); amdgpu_bo_unpin(bo); @@ -1009,17 +1012,16 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, goto err; if (adev->asic_type >= CHIP_VEGA10) { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0_VEGA10, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1_VEGA10, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD_VEGA10, 0); - data[3] = PACKET0(mmUVD_NO_OP_VEGA10, 0); - } else { - data[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); - data[1] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); - data[2] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); - data[3] = PACKET0(mmUVD_NO_OP, 0); + offset_idx = 1 + ring->me; + offset[1] = adev->reg_offset[UVD_HWIP][0][1]; + offset[2] = adev->reg_offset[UVD_HWIP][1][1]; } + data[0] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA0, 0); + data[1] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_DATA1, 0); + data[2] = PACKET0(offset[offset_idx] + UVD_GPCOM_VCPU_CMD, 0); + data[3] = PACKET0(offset[offset_idx] + UVD_NO_OP, 0); + ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); ib->ptr[0] = data[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 08f3b6c84bea..6b719e11b2cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -40,6 +40,8 @@ #include "mmhub/mmhub_1_0_offset.h" #include "mmhub/mmhub_1_0_sh_mask.h" +#define UVD7_MAX_HW_INSTANCES_VEGA20 2 + static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev); static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev); @@ -370,7 +372,10 @@ error: static int uvd_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->uvd.num_uvd_inst = 1; + if (adev->asic_type == CHIP_VEGA20) + adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20; + else + adev->uvd.num_uvd_inst = 1; if (amdgpu_sriov_vf(adev)) adev->uvd.num_enc_rings = 1; -- cgit v1.2.1 From 915893fd2b7bdb0e1e0a16ca402345ebc60e391b Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 23 Apr 2018 20:49:28 -0400 Subject: drm/amdgpu/vg20:Add IH client ID for the 2nd UVD For Vega20, there are two UVD hardware. Need add the 2nd IH client ID for the 2nd UVD Hardware. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/soc15_ih_clientid.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h index a12d4f27cfa4..12e196c15bbe 100644 --- a/drivers/gpu/drm/amd/include/soc15_ih_clientid.h +++ b/drivers/gpu/drm/amd/include/soc15_ih_clientid.h @@ -43,6 +43,7 @@ enum soc15_ih_clientid { SOC15_IH_CLIENTID_SE2SH = 0x0c, SOC15_IH_CLIENTID_SE3SH = 0x0d, SOC15_IH_CLIENTID_SYSHUB = 0x0e, + SOC15_IH_CLIENTID_UVD1 = 0x0e, SOC15_IH_CLIENTID_THM = 0x0f, SOC15_IH_CLIENTID_UVD = 0x10, SOC15_IH_CLIENTID_VCE0 = 0x11, -- cgit v1.2.1 From b53a6ebcc55971169e56982fd9131d1a6969a053 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 23 Apr 2018 20:56:01 -0400 Subject: drm/amdgpu/vg20:Enable the 2nd instance IRQ for uvd 7.2 For Vega20, the 2nd instance uvd IRQ using different client id. Enable the 2nd instance IRQ for uvd 7.2 Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 6b719e11b2cd..f9a5482101bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -49,6 +49,11 @@ static int uvd_v7_0_start(struct amdgpu_device *adev); static void uvd_v7_0_stop(struct amdgpu_device *adev); static int uvd_v7_0_sriov_start(struct amdgpu_device *adev); +static int amdgpu_ih_clientid_uvds[] = { + SOC15_IH_CLIENTID_UVD, + SOC15_IH_CLIENTID_UVD1 +}; + /** * uvd_v7_0_ring_get_rptr - get read pointer * @@ -397,13 +402,13 @@ static int uvd_v7_0_sw_init(void *handle) for (j = 0; j < adev->uvd.num_uvd_inst; j++) { /* UVD TRAP */ - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, 124, &adev->uvd.inst[j].irq); + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], 124, &adev->uvd.inst[j].irq); if (r) return r; /* UVD ENC TRAP */ for (i = 0; i < adev->uvd.num_enc_rings; ++i) { - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UVD, i + 119, &adev->uvd.inst[j].irq); + r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], i + 119, &adev->uvd.inst[j].irq); if (r) return r; } @@ -1480,6 +1485,9 @@ static int uvd_v7_0_process_interrupt(struct amdgpu_device *adev, case SOC15_IH_CLIENTID_UVD: ip_instance = 0; break; + case SOC15_IH_CLIENTID_UVD1: + ip_instance = 1; + break; default: DRM_ERROR("Unhandled client id: %d\n", entry->client_id); return 0; -- cgit v1.2.1 From 04305acb9f7fc9978ed7a14bf965802c45ea9682 Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 23 Apr 2018 21:00:58 -0400 Subject: drm/amdgpu/vg20:Enable 2nd instance queue maping for uvd 7.2 Enable 2nd instance uvd queue maping for uvd 7.2. For user, only one UVD instance presents. there is two rings for uvd decode, and 4 rings for uvd encode. Signed-off-by: James Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c index 2458d385e55a..8af16e81c7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c @@ -66,6 +66,8 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, u32 ring, struct amdgpu_ring **out_ring) { + u32 instance; + switch (mapper->hw_ip) { case AMDGPU_HW_IP_GFX: *out_ring = &adev->gfx.gfx_ring[ring]; @@ -77,13 +79,16 @@ static int amdgpu_identity_map(struct amdgpu_device *adev, *out_ring = &adev->sdma.instance[ring].ring; break; case AMDGPU_HW_IP_UVD: - *out_ring = &adev->uvd.inst->ring; + instance = ring; + *out_ring = &adev->uvd.inst[instance].ring; break; case AMDGPU_HW_IP_VCE: *out_ring = &adev->vce.ring[ring]; break; case AMDGPU_HW_IP_UVD_ENC: - *out_ring = &adev->uvd.inst->ring_enc[ring]; + instance = ring / adev->uvd.num_enc_rings; + *out_ring = + &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings]; break; case AMDGPU_HW_IP_VCN_DEC: *out_ring = &adev->vcn.ring_dec; @@ -240,13 +245,14 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev, ip_num_rings = adev->sdma.num_instances; break; case AMDGPU_HW_IP_UVD: - ip_num_rings = 1; + ip_num_rings = adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCE: ip_num_rings = adev->vce.num_rings; break; case AMDGPU_HW_IP_UVD_ENC: - ip_num_rings = adev->uvd.num_enc_rings; + ip_num_rings = + adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst; break; case AMDGPU_HW_IP_VCN_DEC: ip_num_rings = 1; -- cgit v1.2.1 From 705e98d77bc61d234ef5a1867acb38f6d0d40e4f Mon Sep 17 00:00:00 2001 From: James Zhu Date: Mon, 30 Apr 2018 08:43:12 -0400 Subject: drm/amdgpu/vg20:Enable UVD/VCE for Vega20 Vega20 ucode load type is set to AMDGPU_FW_LOAD_DIRECT for default. So UVD/VCE needn't PSP IP block up. UVD/VCE for Vega20 can be enabled at this moment. Signed-off-by: James Zhu Reviewed-by: Leo Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 295bc9cd46f0..987271b18fd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -529,10 +529,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) #endif amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - if (adev->asic_type != CHIP_VEGA20) { - amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); - amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); - } + amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; case CHIP_RAVEN: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); -- cgit v1.2.1 From 9883e9d751dad05e8c3ad3c6b769dafc60762c38 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 14 May 2018 11:50:46 -0500 Subject: drm/amdgpu: add df 3.6 headers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Needed for vega20. Acked-by: Christian König Signed-off-by: Alex Deucher --- .../drm/amd/include/asic_reg/df/df_3_6_default.h | 26 ++++++++++++ .../drm/amd/include/asic_reg/df/df_3_6_offset.h | 33 +++++++++++++++ .../drm/amd/include/asic_reg/df/df_3_6_sh_mask.h | 48 ++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h create mode 100644 drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h new file mode 100644 index 000000000000..e58c207ac980 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_default.h @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_3_6_DEFAULT_HEADER +#define _df_3_6_DEFAULT_HEADER + +#define mmFabricConfigAccessControl_DEFAULT 0x00000000 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h new file mode 100644 index 000000000000..a9575db8d7aa --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_3_6_OFFSET_HEADER +#define _df_3_6_OFFSET_HEADER + +#define mmFabricConfigAccessControl 0x0410 +#define mmFabricConfigAccessControl_BASE_IDX 0 + +#define mmDF_PIE_AON0_DfGlobalClkGater 0x00fc +#define mmDF_PIE_AON0_DfGlobalClkGater_BASE_IDX 0 + +#define mmDF_CS_UMC_AON0_DramBaseAddress0 0x0044 +#define mmDF_CS_UMC_AON0_DramBaseAddress0_BASE_IDX 0 + +#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h new file mode 100644 index 000000000000..88f7c69df6b9 --- /dev/null +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef _df_3_6_SH_MASK_HEADER +#define _df_3_6_SH_MASK_HEADER + +/* FabricConfigAccessControl */ +#define FabricConfigAccessControl__CfgRegInstAccEn__SHIFT 0x0 +#define FabricConfigAccessControl__CfgRegInstAccRegLock__SHIFT 0x1 +#define FabricConfigAccessControl__CfgRegInstID__SHIFT 0x10 +#define FabricConfigAccessControl__CfgRegInstAccEn_MASK 0x00000001L +#define FabricConfigAccessControl__CfgRegInstAccRegLock_MASK 0x00000002L +#define FabricConfigAccessControl__CfgRegInstID_MASK 0x00FF0000L + +/* DF_PIE_AON0_DfGlobalClkGater */ +#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode__SHIFT 0x0 +#define DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK 0x0000000FL + +/* DF_CS_AON0_DramBaseAddress0 */ +#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 +#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc +#define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L +#define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L + +#endif -- cgit v1.2.1 From 13b581502d5101adadfb7ea269ff4c8074ba76cb Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Wed, 4 Apr 2018 14:30:28 +0800 Subject: drm/amdgpu/df: implement df v3_6 callback functions (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New df helpers for 3.6. v2: switch to using df 3.6 headers (Alex) Acked-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 3 +- drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 116 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/df_v3_6.h | 40 ++++++++++++ 3 files changed, 158 insertions(+), 1 deletion(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.c create mode 100644 drivers/gpu/drm/amd/amdgpu/df_v3_6.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 012ea37b81be..a51c5a960750 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -67,7 +67,8 @@ amdgpu-y += \ # add DF block amdgpu-y += \ - df_v1_7.o + df_v1_7.o \ + df_v3_6.o # add GMC block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c new file mode 100644 index 000000000000..60608b3df881 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "df_v3_6.h" + +#include "df/df_3_6_default.h" +#include "df/df_3_6_offset.h" +#include "df/df_3_6_sh_mask.h" + +static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, + 16, 32, 0, 0, 0, 2, 4, 8}; + +static void df_v3_6_init(struct amdgpu_device *adev) +{ +} + +static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + if (enable) { + tmp = RREG32_SOC15(DF, 0, mmFabricConfigAccessControl); + tmp &= ~FabricConfigAccessControl__CfgRegInstAccEn_MASK; + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, tmp); + } else + WREG32_SOC15(DF, 0, mmFabricConfigAccessControl, + mmFabricConfigAccessControl_DEFAULT); +} + +static u32 df_v3_6_get_fb_channel_number(struct amdgpu_device *adev) +{ + u32 tmp; + + tmp = RREG32_SOC15(DF, 0, mmDF_CS_UMC_AON0_DramBaseAddress0); + tmp &= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK; + tmp >>= DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + + return tmp; +} + +static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) +{ + int fb_channel_number; + + fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); + if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) + fb_channel_number = 0; + + return df_v3_6_channel_number[fb_channel_number]; +} + +static void df_v3_6_update_medium_grain_clock_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 tmp; + + /* Put DF on broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, true); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DF_MGCG)) { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } else { + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + tmp &= ~DF_PIE_AON0_DfGlobalClkGater__MGCGMode_MASK; + tmp |= DF_V3_6_MGCG_DISABLE; + WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp); + } + + /* Exit broadcast mode */ + adev->df_funcs->enable_broadcast_mode(adev, false); +} + +static void df_v3_6_get_clockgating_state(struct amdgpu_device *adev, + u32 *flags) +{ + u32 tmp; + + /* AMD_CG_SUPPORT_DF_MGCG */ + tmp = RREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater); + if (tmp & DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY) + *flags |= AMD_CG_SUPPORT_DF_MGCG; +} + +const struct amdgpu_df_funcs df_v3_6_funcs = { + .init = df_v3_6_init, + .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, + .get_fb_channel_number = df_v3_6_get_fb_channel_number, + .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, + .update_medium_grain_clock_gating = + df_v3_6_update_medium_grain_clock_gating, + .get_clockgating_state = df_v3_6_get_clockgating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.h b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h new file mode 100644 index 000000000000..e79c58e5efcb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.h @@ -0,0 +1,40 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __DF_V3_6_H__ +#define __DF_V3_6_H__ + +#include "soc15_common.h" + +enum DF_V3_6_MGCG { + DF_V3_6_MGCG_DISABLE = 0, + DF_V3_6_MGCG_ENABLE_00_CYCLE_DELAY = 1, + DF_V3_6_MGCG_ENABLE_01_CYCLE_DELAY = 2, + DF_V3_6_MGCG_ENABLE_15_CYCLE_DELAY = 13, + DF_V3_6_MGCG_ENABLE_31_CYCLE_DELAY = 14, + DF_V3_6_MGCG_ENABLE_63_CYCLE_DELAY = 15 +}; + +extern const struct amdgpu_df_funcs df_v3_6_funcs; + +#endif -- cgit v1.2.1 From 698758bbb3e3e344073f86f2d011cc536d94da49 Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Wed, 4 Apr 2018 14:32:10 +0800 Subject: drm/amdgpu: Switch to use df_v3_6_funcs for vega20 (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: fix whitespace (Alex) Reviewed-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 987271b18fd1..0e4f67e4c875 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -51,6 +51,7 @@ #include "gfxhub_v1_0.h" #include "mmhub_v1_0.h" #include "df_v1_7.h" +#include "df_v3_6.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -501,7 +502,10 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) else adev->nbio_funcs = &nbio_v6_1_funcs; - adev->df_funcs = &df_v1_7_funcs; + if (adev->asic_type == CHIP_VEGA20) + adev->df_funcs = &df_v3_6_funcs; + else + adev->df_funcs = &df_v1_7_funcs; adev->nbio_funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) -- cgit v1.2.1 From 1204a26e03a2b46917f7164e665dfc3b67a0ae1e Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Mon, 22 Jan 2018 19:08:33 +0800 Subject: drm/amdgpu: Add vega20 pci ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Acked-by: Christian König Signed-off-by: Feifei Xu Reviewed-by: Alex Deucher Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 739e7e09c8b0..e33e53cde634 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -560,6 +560,13 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, + /* Vega 20 */ + {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, -- cgit v1.2.1 From 950f23ebdcfc7ca53d32d76631ba6c4e61d0f88e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 14 May 2018 11:28:04 -0500 Subject: drm/amdgpu: flag Vega20 as experimental MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Must set amdgpu.exp_hw_support=1 on the kernel command line in grub to enable support. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index e33e53cde634..b0bf2f24da48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -561,12 +561,12 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, {0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12}, /* Vega 20 */ - {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, - {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, - {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, - {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, - {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, - {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20}, + {0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20|AMD_EXP_HW_SUPPORT}, /* Raven */ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, -- cgit v1.2.1 From 20b6b7885df58b86d9b2768852bb2c81081e2c93 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Tue, 15 May 2018 14:12:21 -0400 Subject: drm/amdgpu: Skip drm_sched_entity related ops for KIQ ring. Following change 75fbed2 we never initialize or use the GPU scheduler for KIQ and hence we need to skip KIQ ring when iterating amdgpu_ctx's scheduler entites. Signed-off-by: Andrey Grodzovsky Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a8e531d604fa..c5bb36275e93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -173,9 +173,14 @@ static void amdgpu_ctx_do_release(struct kref *ref) ctx = container_of(ref, struct amdgpu_ctx, refcount); - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + drm_sched_entity_fini(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); + } amdgpu_ctx_fini(ref); } @@ -452,12 +457,17 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + if (kref_read(&ctx->refcount) == 1) drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); + } } } @@ -474,12 +484,17 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) if (!ctx->adev) return; - for (i = 0; i < ctx->adev->num_rings; i++) + for (i = 0; i < ctx->adev->num_rings; i++) { + + if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) + continue; + if (kref_read(&ctx->refcount) == 1) drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, &ctx->rings[i].entity); else DRM_ERROR("ctx %p is still alive\n", ctx); + } } } -- cgit v1.2.1 From 01233b8073455e5d489b95758c3afeb78ff94530 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Mon, 14 May 2018 16:03:01 +0800 Subject: drm/amd/pp: Workaround flickering issue on RV Screen flickering observed while running 1080p video using MPV_VAAPI/VDPAU with 4x4K@60 monitors Need to set higher mclk in this configuration. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 2f69bfa478a7..017ef2d169e9 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -600,7 +600,10 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, data->gfx_min_freq_limit/100); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, + hwmgr->display_config->num_display > 3 ? + SMU10_UMD_PSTATE_PEAK_FCLK : SMU10_UMD_PSTATE_MIN_FCLK); + smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, SMU10_UMD_PSTATE_MIN_SOCCLK); -- cgit v1.2.1 From b9245b949885f24e84ae16d99d3898a5f1e0ba24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 09:57:21 +0200 Subject: drm/amdgpu: remove unused member MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This lock isn't used any more. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 4cf678684a12..d6827083572a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -187,9 +187,6 @@ struct amdgpu_vm { struct amdgpu_vm_pt root; struct dma_fence *last_update; - /* protecting freed */ - spinlock_t freed_lock; - /* Scheduler entity for page table updates */ struct drm_sched_entity entity; -- cgit v1.2.1 From 2b6dc93a3d439136c3fe11291a506e581b84a327 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 May 2018 08:39:58 -0500 Subject: drm/amdgpu/display: remove VEGAM config option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Leftover from bringup. No need to keep it around for upstream. Reviewed-by: Harry Wentland Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 7 ------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ---- drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c | 2 -- drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c | 2 -- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 4 ---- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 4 ---- drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c | 4 ---- drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c | 2 -- drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c | 2 -- drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c | 2 -- drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h | 2 -- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 6 +----- drivers/gpu/drm/amd/display/include/dal_types.h | 2 -- 13 files changed, 1 insertion(+), 42 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 6dcec9c9126b..a0eef59e65ba 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -34,13 +34,6 @@ config DEBUG_KERNEL_DC if you want to hit kdgb_break in assert. -config DRM_AMD_DC_VEGAM - bool "VEGAM support" - depends on DRM_AMD_DC - help - Choose this option if you want to have - VEGAM support for display engine - config DRM_AMD_DC_VG20 bool "Vega20 support" depends on DRM_AMD_DC diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6f5cb26b243c..6d0dc1fecb39 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1514,9 +1514,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case CHIP_VEGAM: -#endif case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: @@ -1710,9 +1708,7 @@ static int dm_early_init(void *handle) adev->mode_info.plane_type = dm_plane_type_default; break; case CHIP_POLARIS10: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case CHIP_VEGAM: -#endif adev->mode_info.num_crtc = 6; adev->mode_info.num_hpd = 6; adev->mode_info.num_dig = 6; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c index be066c49b984..253bbb1eea60 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c @@ -51,9 +51,7 @@ bool dal_bios_parser_init_cmd_tbl_helper( return true; case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif *h = dal_cmd_tbl_helper_dce112_get_table(); return true; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c index 9b9e06995805..bbbcef566c55 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c @@ -52,9 +52,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2( return true; case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif *h = dal_cmd_tbl_helper_dce112_get_table2(); return true; #if defined(CONFIG_DRM_AMD_DC_DCN1_0) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c index 4ee3c26f7c13..2c4e8f0cb2dc 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c @@ -59,10 +59,8 @@ static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asi return BW_CALCS_VERSION_POLARIS10; if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) return BW_CALCS_VERSION_POLARIS11; -#if defined(CONFIG_DRM_AMD_DC_VEGAM) if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) return BW_CALCS_VERSION_VEGAM; -#endif return BW_CALCS_VERSION_INVALID; case FAMILY_AI: @@ -2151,11 +2149,9 @@ void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, dceip.mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ break; case BW_CALCS_VERSION_POLARIS10: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) /* TODO: Treat VEGAM the same as P10 for now * Need to tune the para for VEGAM if needed */ case BW_CALCS_VERSION_VEGAM: -#endif vbios.memory_type = bw_def_gddr5; vbios.dram_channel_width_in_bits = 32; vbios.number_of_dram_channels = asic_id.vram_width / vbios.dram_channel_width_in_bits; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 9eb731fb5251..345835ff58d1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -79,10 +79,8 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id) ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) { dc_version = DCE_VERSION_11_2; } -#if defined(CONFIG_DRM_AMD_DC_VEGAM) if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) dc_version = DCE_VERSION_11_22; -#endif break; case FAMILY_AI: dc_version = DCE_VERSION_12_0; @@ -129,9 +127,7 @@ struct resource_pool *dc_create_resource_pool( num_virtual_links, dc, asic_id); break; case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif res_pool = dce112_create_resource_pool( num_virtual_links, dc); break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 223db98a568a..0570e7e4d0a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -590,9 +590,7 @@ static uint32_t dce110_get_pix_clk_dividers( pll_settings, pix_clk_params); break; case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif case DCE_VERSION_12_0: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: @@ -982,9 +980,7 @@ static bool dce110_program_pix_clk( break; case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif case DCE_VERSION_12_0: #if defined(CONFIG_DRM_AMD_DC_DCN1_0) case DCN_VERSION_1_0: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c index 61fe484da1a0..0caee3523017 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c @@ -75,9 +75,7 @@ bool dal_hw_factory_init( return true; case DCE_VERSION_11_0: case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif dal_hw_factory_dce110_init(factory); return true; case DCE_VERSION_12_0: diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c index 910ae2b7bf64..55c707488541 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c @@ -72,9 +72,7 @@ bool dal_hw_translate_init( case DCE_VERSION_10_0: case DCE_VERSION_11_0: case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif dal_hw_translate_dce110_init(translate); return true; case DCE_VERSION_12_0: diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c index c3d7c320fdba..14dc8c94d862 100644 --- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c +++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c @@ -83,9 +83,7 @@ struct i2caux *dal_i2caux_create( case DCE_VERSION_8_3: return dal_i2caux_dce80_create(ctx); case DCE_VERSION_11_2: -#if defined(CONFIG_DRM_AMD_DC_VEGAM) case DCE_VERSION_11_22: -#endif return dal_i2caux_dce112_create(ctx); case DCE_VERSION_11_0: return dal_i2caux_dce110_create(ctx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h index 933ea7a1e18b..eece165206f9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h @@ -43,9 +43,7 @@ enum bw_calcs_version { BW_CALCS_VERSION_POLARIS10, BW_CALCS_VERSION_POLARIS11, BW_CALCS_VERSION_POLARIS12, -#if defined(CONFIG_DRM_AMD_DC_VEGAM) BW_CALCS_VERSION_VEGAM, -#endif BW_CALCS_VERSION_STONEY, BW_CALCS_VERSION_VEGA10 }; diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 77d2856be9f6..6aeb5a2902c3 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -86,6 +86,7 @@ #define VI_POLARIS10_P_A0 80 #define VI_POLARIS11_M_A0 90 #define VI_POLARIS12_V_A0 100 +#define VI_VEGAM_A0 110 #define VI_UNKNOWN 0xFF @@ -98,14 +99,9 @@ (eChipRev < VI_POLARIS11_M_A0)) #define ASIC_REV_IS_POLARIS11_M(eChipRev) ((eChipRev >= VI_POLARIS11_M_A0) && \ (eChipRev < VI_POLARIS12_V_A0)) -#if defined(CONFIG_DRM_AMD_DC_VEGAM) -#define VI_VEGAM_A0 110 #define ASIC_REV_IS_POLARIS12_V(eChipRev) ((eChipRev >= VI_POLARIS12_V_A0) && \ (eChipRev < VI_VEGAM_A0)) #define ASIC_REV_IS_VEGAM(eChipRev) (eChipRev >= VI_VEGAM_A0) -#else -#define ASIC_REV_IS_POLARIS12_V(eChipRev) (eChipRev >= VI_POLARIS12_V_A0) -#endif /* DCE11 */ #define CZ_CARRIZO_A0 0x01 diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h index 5b1f8cef0c22..840142b65f8b 100644 --- a/drivers/gpu/drm/amd/display/include/dal_types.h +++ b/drivers/gpu/drm/amd/display/include/dal_types.h @@ -40,9 +40,7 @@ enum dce_version { DCE_VERSION_10_0, DCE_VERSION_11_0, DCE_VERSION_11_2, -#if defined(CONFIG_DRM_AMD_DC_VEGAM) DCE_VERSION_11_22, -#endif DCE_VERSION_12_0, DCE_VERSION_MAX, DCN_VERSION_1_0, -- cgit v1.2.1 From b4b9f944e4ee3d1a268d96d7de2d519b491e8ea5 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 May 2018 15:28:59 -0500 Subject: drm/amdgpu/display: remove VEGA20 config option Leftover from bringup. No need to keep it around for upstream. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/Kconfig | 8 - drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 - drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 6 - .../drm/amd/display/dc/dce120/dce120_resource.c | 177 --------------------- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 2 - 5 files changed, 195 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index a0eef59e65ba..d5d4586e6176 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -34,12 +34,4 @@ config DEBUG_KERNEL_DC if you want to hit kdgb_break in assert. -config DRM_AMD_DC_VG20 - bool "Vega20 support" - depends on DRM_AMD_DC - help - Choose this option if you want to have - Vega20 support for display engine - - endmenu diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 4561673a0fe6..b8cef7af3c4a 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1331,9 +1331,7 @@ static enum bp_result bios_parser_get_firmware_info( result = get_firmware_info_v3_2(bp, info); break; case 3: -#ifdef CONFIG_DRM_AMD_DC_VG20 result = get_firmware_info_v3_2(bp, info); -#endif break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index aa4cf3095235..f043e5ea412c 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -413,18 +413,12 @@ static int dce112_set_clock( /*VBIOS will determine DPREFCLK frequency, so we don't set it*/ dce_clk_params.target_clock_frequency = 0; dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK; -#ifndef CONFIG_DRM_AMD_DC_VG20 - dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = - (dce_clk_params.pll_id == - CLOCK_SOURCE_COMBO_DISPLAY_PLL0); -#else if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev)) dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = (dce_clk_params.pll_id == CLOCK_SOURCE_COMBO_DISPLAY_PLL0); else dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false; -#endif bp->funcs->set_dce_clock(bp, &dce_clk_params); diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 545f35f0821f..2d58daccc005 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -814,7 +814,6 @@ static void bw_calcs_data_update_from_pplib(struct dc *dc) dm_pp_notify_wm_clock_changes(dc->ctx, &clk_ranges); } -#ifdef CONFIG_DRM_AMD_DC_VG20 static uint32_t read_pipe_fuses(struct dc_context *ctx) { uint32_t value = dm_read_reg_soc15(ctx, mmCC_DC_PIPE_DIS, 0); @@ -1020,182 +1019,6 @@ res_create_fail: return false; } -#else -static bool construct( - uint8_t num_virtual_links, - struct dc *dc, - struct dce110_resource_pool *pool) -{ - unsigned int i; - struct dc_context *ctx = dc->ctx; - struct irq_service_init_data irq_init_data; - - ctx->dc_bios->regs = &bios_regs; - - pool->base.res_cap = &res_cap; - pool->base.funcs = &dce120_res_pool_funcs; - - /* TODO: Fill more data from GreenlandAsicCapability.cpp */ - pool->base.pipe_count = res_cap.num_timing_generator; - pool->base.timing_generator_count = pool->base.res_cap->num_timing_generator; - pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - - dc->caps.max_downscale_ratio = 200; - dc->caps.i2c_speed_in_khz = 100; - dc->caps.max_cursor_size = 128; - dc->caps.dual_link_dvi = true; - - dc->debug = debug_defaults; - - /************************************************* - * Create resources * - *************************************************/ - - pool->base.clock_sources[DCE120_CLK_SRC_PLL0] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL0, - &clk_src_regs[0], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL1] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL1, - &clk_src_regs[1], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL2] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL2, - &clk_src_regs[2], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL3] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL3, - &clk_src_regs[3], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL4] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL4, - &clk_src_regs[4], false); - pool->base.clock_sources[DCE120_CLK_SRC_PLL5] = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_COMBO_PHY_PLL5, - &clk_src_regs[5], false); - pool->base.clk_src_count = DCE120_CLK_SRC_TOTAL; - - pool->base.dp_clock_source = - dce120_clock_source_create(ctx, ctx->dc_bios, - CLOCK_SOURCE_ID_DP_DTO, - &clk_src_regs[0], true); - - for (i = 0; i < pool->base.clk_src_count; i++) { - if (pool->base.clock_sources[i] == NULL) { - dm_error("DC: failed to create clock sources!\n"); - BREAK_TO_DEBUGGER(); - goto clk_src_create_fail; - } - } - - pool->base.display_clock = dce120_disp_clk_create(ctx); - if (pool->base.display_clock == NULL) { - dm_error("DC: failed to create display clock!\n"); - BREAK_TO_DEBUGGER(); - goto disp_clk_create_fail; - } - - pool->base.dmcu = dce_dmcu_create(ctx, - &dmcu_regs, - &dmcu_shift, - &dmcu_mask); - if (pool->base.dmcu == NULL) { - dm_error("DC: failed to create dmcu!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - pool->base.abm = dce_abm_create(ctx, - &abm_regs, - &abm_shift, - &abm_mask); - if (pool->base.abm == NULL) { - dm_error("DC: failed to create abm!\n"); - BREAK_TO_DEBUGGER(); - goto res_create_fail; - } - - irq_init_data.ctx = dc->ctx; - pool->base.irqs = dal_irq_service_dce120_create(&irq_init_data); - if (!pool->base.irqs) - goto irqs_create_fail; - - for (i = 0; i < pool->base.pipe_count; i++) { - pool->base.timing_generators[i] = - dce120_timing_generator_create( - ctx, - i, - &dce120_tg_offsets[i]); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto controller_create_fail; - } - - pool->base.mis[i] = dce120_mem_input_create(ctx, i); - - if (pool->base.mis[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create memory input!\n"); - goto controller_create_fail; - } - - pool->base.ipps[i] = dce120_ipp_create(ctx, i); - if (pool->base.ipps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create input pixel processor!\n"); - goto controller_create_fail; - } - - pool->base.transforms[i] = dce120_transform_create(ctx, i); - if (pool->base.transforms[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create transform!\n"); - goto res_create_fail; - } - - pool->base.opps[i] = dce120_opp_create( - ctx, - i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - } - } - - if (!resource_construct(num_virtual_links, dc, &pool->base, - &res_create_funcs)) - goto res_create_fail; - - /* Create hardware sequencer */ - if (!dce120_hw_sequencer_create(dc)) - goto controller_create_fail; - - dc->caps.max_planes = pool->base.pipe_count; - - bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id); - - bw_calcs_data_update_from_pplib(dc); - - return true; - -irqs_create_fail: -controller_create_fail: -disp_clk_create_fail: -clk_src_create_fail: -res_create_fail: - - destruct(pool); - - return false; -} -#endif struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 6aeb5a2902c3..cac069dd2a0e 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -115,10 +115,8 @@ /* DCE12 */ #define AI_UNKNOWN 0xFF -#ifdef CONFIG_DRM_AMD_DC_VG20 #define AI_VEGA20_P_A0 40 #define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN)) -#endif #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 -- cgit v1.2.1 From a1a0c40664fbd0bd1a9fa53e14ccab539005e2ca Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 16 May 2018 15:34:19 -0500 Subject: drm/amdgpu/display: fix vega12/20 handling in dal_asic_id.h - Remove unused ASICREV_IS_VEGA12_p() macro - Fix ASICREV_IS_VEGA12_P() macro to properly check against vega20 Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/dal_asic_id.h | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index cac069dd2a0e..25029ed42d89 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -115,19 +115,17 @@ /* DCE12 */ #define AI_UNKNOWN 0xFF -#define AI_VEGA20_P_A0 40 -#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN)) - #define AI_GREENLAND_P_A0 1 #define AI_GREENLAND_P_A1 2 #define AI_UNKNOWN 0xFF #define AI_VEGA12_P_A0 20 +#define AI_VEGA20_P_A0 40 #define ASICREV_IS_GREENLAND_M(eChipRev) (eChipRev < AI_VEGA12_P_A0) #define ASICREV_IS_GREENLAND_P(eChipRev) (eChipRev < AI_VEGA12_P_A0) -#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) -#define ASICREV_IS_VEGA12_p(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_UNKNOWN)) +#define ASICREV_IS_VEGA12_P(eChipRev) ((eChipRev >= AI_VEGA12_P_A0) && (eChipRev < AI_VEGA20_P_A0)) +#define ASICREV_IS_VEGA20_P(eChipRev) ((eChipRev >= AI_VEGA20_P_A0) && (eChipRev < AI_UNKNOWN)) /* DCN1_0 */ #define INTERNAL_REV_RAVEN_A0 0x00 /* First spin of Raven */ -- cgit v1.2.1 From fa19a6e9d0e7b46bedaa526ba71ff1bf376dd93f Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 17 May 2018 15:56:05 +0300 Subject: drm/amd/pp: missing curly braces in smu7_enable_sclk_mclk_dpm() We added some more lines of code to this if statement but forgot to add curly braces. Fixes: 0c24e7ef233b ("drm/amd/powerplay: add specific changes for VEGAM in smu7_hwmgr.c") Signed-off-by: Dan Carpenter Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 8eb3f5176646..646c9e9bf681 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -1018,7 +1018,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); /* enable SCLK dpm */ - if (!data->sclk_dpm_key_disabled) + if (!data->sclk_dpm_key_disabled) { if (hwmgr->chip_id == CHIP_VEGAM) smu7_disable_sclk_vce_handshake(hwmgr); @@ -1026,6 +1026,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)), "Failed to enable SCLK DPM during DPM Start Function!", return -EINVAL); + } /* enable MCLK dpm */ if (0 == data->mclk_dpm_key_disabled) { -- cgit v1.2.1 From 50da51744f005f4afd44b69c03e6f2068abfaed8 Mon Sep 17 00:00:00 2001 From: Tom St Denis Date: Wed, 9 May 2018 14:22:29 -0400 Subject: drm/amd/amdgpu: Code comments for the amdgpu_ttm.c driver. (v2) NFC just comments. (v2): Updated based on feedback from Alex Deucher. Signed-off-by: Tom St Denis Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 348 +++++++++++++++++++++++++++++++- 1 file changed, 341 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 69a2b25b3696..e93a0a237dc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -63,16 +63,44 @@ static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); /* * Global memory. */ + +/** + * amdgpu_ttm_mem_global_init - Initialize and acquire reference to + * memory object + * + * @ref: Object for initialization. + * + * This is called by drm_global_item_ref() when an object is being + * initialized. + */ static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) { return ttm_mem_global_init(ref->object); } +/** + * amdgpu_ttm_mem_global_release - Drop reference to a memory object + * + * @ref: Object being removed + * + * This is called by drm_global_item_unref() when an object is being + * released. + */ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) { ttm_mem_global_release(ref->object); } +/** + * amdgpu_ttm_global_init - Initialize global TTM memory reference + * structures. + * + * @adev: AMDGPU device for which the global structures need to be + * registered. + * + * This is called as part of the AMDGPU ttm init from amdgpu_ttm_init() + * during bring up. + */ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) { struct drm_global_reference *global_ref; @@ -80,7 +108,9 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) struct drm_sched_rq *rq; int r; + /* ensure reference is false in case init fails */ adev->mman.mem_global_referenced = false; + global_ref = &adev->mman.mem_global_ref; global_ref->global_type = DRM_GLOBAL_TTM_MEM; global_ref->size = sizeof(struct ttm_mem_global); @@ -146,6 +176,18 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) return 0; } +/** + * amdgpu_init_mem_type - Initialize a memory manager for a specific + * type of memory request. + * + * @bdev: The TTM BO device object (contains a reference to + * amdgpu_device) + * @type: The type of memory requested + * @man: + * + * This is called by ttm_bo_init_mm() when a buffer object is being + * initialized. + */ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { @@ -161,6 +203,7 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_TT: + /* GTT memory */ man->func = &amdgpu_gtt_mgr_func; man->gpu_offset = adev->gmc.gart_start; man->available_caching = TTM_PL_MASK_CACHING; @@ -193,6 +236,14 @@ static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, return 0; } +/** + * amdgpu_evict_flags - Compute placement flags + * + * @bo: The buffer object to evict + * @placement: Possible destination(s) for evicted BO + * + * Fill in placement data when ttm_bo_evict() is called + */ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { @@ -204,12 +255,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM }; + /* Don't handle scatter gather BOs */ if (bo->type == ttm_bo_type_sg) { placement->num_placement = 0; placement->num_busy_placement = 0; return; } + /* Object isn't an AMDGPU object so ignore */ if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { placement->placement = &placements; placement->busy_placement = &placements; @@ -217,10 +270,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, placement->num_busy_placement = 1; return; } + abo = ttm_to_amdgpu_bo(bo); switch (bo->mem.mem_type) { case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { + /* Move to system memory */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); } else if (adev->gmc.visible_vram_size < adev->gmc.real_vram_size && !(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) && @@ -238,6 +293,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { + /* Move to GTT memory */ amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); } break; @@ -248,6 +304,15 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, *placement = abo->placement; } +/** + * amdgpu_verify_access - Verify access for a mmap call + * + * @bo: The buffer object to map + * @filp: The file pointer from the process performing the mmap + * + * This is called by ttm_bo_mmap() to verify whether a process + * has the right to mmap a BO to their process space. + */ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); @@ -265,6 +330,15 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) filp->private_data); } +/** + * amdgpu_move_null - Register memory for a buffer object + * + * @bo: The bo to assign the memory to + * @new_mem: The memory to be assigned. + * + * Assign the memory from new_mem to the memory of the buffer object + * bo. + */ static void amdgpu_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { @@ -275,6 +349,10 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo, new_mem->mm_node = NULL; } +/** + * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT + * buffer. + */ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, struct drm_mm_node *mm_node, struct ttm_mem_reg *mem) @@ -289,9 +367,10 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo, } /** - * amdgpu_find_mm_node - Helper function finds the drm_mm_node - * corresponding to @offset. It also modifies the offset to be - * within the drm_mm_node returned + * amdgpu_find_mm_node - Helper function finds the drm_mm_node + * corresponding to @offset. It also modifies + * the offset to be within the drm_mm_node + * returned */ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem, unsigned long *offset) @@ -430,7 +509,12 @@ error: return r; } - +/** + * amdgpu_move_blit - Copy an entire buffer to another buffer + * + * This is a helper called by amdgpu_bo_move() and + * amdgpu_move_vram_ram() to help move buffers to and from VRAM. + */ static int amdgpu_move_blit(struct ttm_buffer_object *bo, bool evict, bool no_wait_gpu, struct ttm_mem_reg *new_mem, @@ -465,6 +549,11 @@ error: return r; } +/** + * amdgpu_move_vram_ram - Copy VRAM buffer to RAM buffer + * + * Called by amdgpu_bo_move(). + */ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -477,6 +566,8 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, int r; adev = amdgpu_ttm_adev(bo->bdev); + + /* create space/pages for new_mem in GTT space */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.num_placement = 1; @@ -491,25 +582,36 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, return r; } + /* set caching flags */ r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } + /* Bind the memory to the GTT space */ r = ttm_tt_bind(bo->ttm, &tmp_mem, ctx); if (unlikely(r)) { goto out_cleanup; } + + /* blit VRAM to GTT */ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } + + /* move BO (in tmp_mem) to new_mem */ r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; } +/** + * amdgpu_move_ram_vram - Copy buffer from RAM to VRAM + * + * Called by amdgpu_bo_move(). + */ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -522,6 +624,8 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, int r; adev = amdgpu_ttm_adev(bo->bdev); + + /* make space in GTT for old_mem buffer */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.num_placement = 1; @@ -535,10 +639,14 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, if (unlikely(r)) { return r; } + + /* move/bind old memory to GTT space */ r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } + + /* copy to VRAM */ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; @@ -548,6 +656,11 @@ out_cleanup: return r; } +/** + * amdgpu_bo_move - Move a buffer object to a new memory location + * + * Called by ttm_bo_handle_move_mem() + */ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) @@ -613,6 +726,11 @@ memcpy: return 0; } +/** + * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault + * + * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() + */ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; @@ -690,6 +808,14 @@ struct amdgpu_ttm_tt { uint32_t last_set_pages; }; +/** + * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to + * by a USERPTR pointer to memory + * + * Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos(). + * This provides a wrapper around the get_user_pages() call to provide + * device accessible pages that back user memory. + */ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -719,6 +845,7 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) } } + /* loop enough times using contiguous pages of memory */ do { unsigned num_pages = ttm->num_pages - pinned; uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; @@ -757,6 +884,14 @@ release_pages: return r; } +/** + * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages + * as necessary. + * + * Called by amdgpu_cs_list_validate(). This creates the page list + * that backs user memory and will ultimately be mapped into the device + * address space. + */ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -771,6 +906,11 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) } } +/** + * amdgpu_ttm_tt_mark_user_page - Mark pages as dirty + * + * Called while unpinning userptr pages + */ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -789,7 +929,12 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm) } } -/* prepare the sg table with the user pages */ +/** + * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the + * user pages + * + * Called by amdgpu_ttm_backend_bind() + **/ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -801,17 +946,20 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* Allocate an SG array and squash pages into it */ r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, ttm->num_pages << PAGE_SHIFT, GFP_KERNEL); if (r) goto release_sg; + /* Map SG to device */ r = -ENOMEM; nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); if (nents != ttm->sg->nents) goto release_sg; + /* convert SG to linear array of pages and dma addresses */ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -822,6 +970,9 @@ release_sg: return r; } +/** + * amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages + */ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); @@ -835,9 +986,10 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) if (!ttm->sg->sgl) return; - /* free the sg table and pages again */ + /* unmap the pages mapped to the device */ dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); + /* mark the pages as dirty */ amdgpu_ttm_tt_mark_user_pages(ttm); sg_free_table(ttm->sg); @@ -882,6 +1034,12 @@ gart_bind_fail: return r; } +/** + * amdgpu_ttm_backend_bind - Bind GTT memory + * + * Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem(). + * This handles binding GTT memory to the device address space. + */ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { @@ -912,7 +1070,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return 0; } + /* compute PTE flags relevant to this BO memory */ flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); + + /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); @@ -923,6 +1084,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, return r; } +/** + * amdgpu_ttm_alloc_gart - Allocate GART memory for buffer object + */ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); @@ -938,6 +1102,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) amdgpu_gtt_mgr_has_gart_addr(&bo->mem)) return 0; + /* allocate GTT space */ tmp = bo->mem; tmp.mm_node = NULL; placement.num_placement = 1; @@ -953,7 +1118,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) if (unlikely(r)) return r; + /* compute PTE flags for this buffer object */ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + + /* Bind pages */ gtt->offset = (u64)tmp.start << PAGE_SHIFT; r = amdgpu_ttm_gart_bind(adev, bo, flags); if (unlikely(r)) { @@ -969,6 +1137,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) return 0; } +/** + * amdgpu_ttm_recover_gart - Rebind GTT pages + * + * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to + * rebind GTT pages during a GPU reset. + */ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); @@ -984,12 +1158,19 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) return r; } +/** + * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages + * + * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and + * ttm_tt_destroy(). + */ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; int r; + /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) amdgpu_ttm_tt_unpin_userptr(ttm); @@ -1021,6 +1202,13 @@ static struct ttm_backend_func amdgpu_backend_func = { .destroy = &amdgpu_ttm_backend_destroy, }; +/** + * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO + * + * @bo: The buffer object to create a GTT ttm_tt object around + * + * Called by ttm_tt_create(). + */ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { @@ -1034,6 +1222,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->ttm.ttm.func = &amdgpu_backend_func; + + /* allocate space for the uninitialized page entries */ if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { kfree(gtt); return NULL; @@ -1041,6 +1231,12 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return >t->ttm.ttm; } +/** + * amdgpu_ttm_tt_populate - Map GTT pages visible to the device + * + * Map the pages of a ttm_tt object to an address space visible + * to the underlying device. + */ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { @@ -1048,6 +1244,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, struct amdgpu_ttm_tt *gtt = (void *)ttm; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ if (gtt && gtt->userptr) { ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) @@ -1072,9 +1269,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, } #endif + /* fall back to generic helper to populate the page array + * and map them to the device */ return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); } +/** + * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays + * + * Unmaps pages of a ttm_tt object from the device address space and + * unpopulates the page array backing it. + */ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) { struct amdgpu_device *adev; @@ -1100,9 +1305,21 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) } #endif + /* fall back to generic helper to unmap and unpopulate array */ ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm); } +/** + * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt + * for the current task + * + * @ttm: The ttm_tt object to bind this userptr object to + * @addr: The address in the current tasks VM space to use + * @flags: Requirements of userptr object. + * + * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages + * to current task + */ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags) { @@ -1127,6 +1344,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, return 0; } +/** + * amdgpu_ttm_tt_get_usermm - Return memory manager for ttm_tt object + */ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1140,6 +1360,12 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) return gtt->usertask->mm; } +/** + * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays + * inside an address range for the + * current task. + * + */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, unsigned long end) { @@ -1150,10 +1376,16 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, if (gtt == NULL || !gtt->userptr) return false; + /* Return false if no part of the ttm_tt object lies within + * the range + */ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; + /* Search the lists of tasks that hold this mapping and see + * if current is one of them. If it is return false. + */ spin_lock(>t->guptasklock); list_for_each_entry(entry, >t->guptasks, list) { if (entry->task == current) { @@ -1168,6 +1400,10 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, return true; } +/** + * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been + * invalidated? + */ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, int *last_invalidated) { @@ -1178,6 +1414,12 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, return prev_invalidated != *last_invalidated; } +/** + * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this + * ttm_tt object been invalidated + * since the last time they've + * been set? + */ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1188,6 +1430,9 @@ bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm) return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages; } +/** + * amdgpu_ttm_tt_is_readonly - Is the ttm_tt object read only? + */ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1198,6 +1443,12 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); } +/** + * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object + * + * @ttm: The ttm_tt object to compute the flags for + * @mem: The memory registry backing this ttm_tt object + */ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem) { @@ -1222,6 +1473,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, return flags; } +/** + * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict + * a buffer object. + * + * Return true if eviction is sensible. Called by + * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space() + * which tries to evict buffer objects until it can find space + * for a new object and by ttm_bo_force_list_clean() which is + * used to clean out a memory space. + */ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { @@ -1268,6 +1529,19 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, return ttm_bo_eviction_valuable(bo, place); } +/** + * amdgpu_ttm_access_memory - Read or Write memory that backs a + * buffer object. + * + * @bo: The buffer object to read/write + * @offset: Offset into buffer object + * @buf: Secondary buffer to write/read from + * @len: Length in bytes of access + * @write: true if writing + * + * This is used to access VRAM that backs a buffer object via MMIO + * access for debugging purposes. + */ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write) @@ -1444,13 +1718,22 @@ error_create: adev->fw_vram_usage.reserved_bo = NULL; return r; } - +/** + * amdgpu_ttm_init - Init the memory management (ttm) as well as + * various gtt/vram related fields. + * + * This initializes all of the memory space pools that the TTM layer + * will need such as the GTT space (system memory mapped to the device), + * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which + * can be mapped per VMID. + */ int amdgpu_ttm_init(struct amdgpu_device *adev) { uint64_t gtt_size; int r; u64 vis_vram_limit; + /* initialize global references for vram/gtt */ r = amdgpu_ttm_global_init(adev); if (r) { return r; @@ -1471,6 +1754,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* We opt to avoid OOM on system pages allocations */ adev->mman.bdev.no_retry = true; + /* Initialize VRAM pool with all of VRAM divided into pages */ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, adev->gmc.real_vram_size >> PAGE_SHIFT); if (r) { @@ -1500,6 +1784,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* allocate memory as required for VGA + * This is used for VGA emulation and pre-OS scanout buffers to + * avoid display artifacts while transitioning between pre-OS + * and driver. */ if (adev->gmc.stolen_size) { r = amdgpu_bo_create_kernel(adev, adev->gmc.stolen_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, @@ -1511,6 +1799,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); + /* Compute GTT size, either bsaed on 3/4th the size of RAM size + * or whatever the user passed on module init */ if (amdgpu_gtt_size == -1) { struct sysinfo si; @@ -1521,6 +1811,8 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } else gtt_size = (uint64_t)amdgpu_gtt_size << 20; + + /* Initialize GTT memory pool */ r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT); if (r) { DRM_ERROR("Failed initializing GTT heap.\n"); @@ -1529,6 +1821,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize various on-chip memory pools */ adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; @@ -1568,6 +1861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) } } + /* Register debugfs entries for amdgpu_ttm */ r = amdgpu_ttm_debugfs_init(adev); if (r) { DRM_ERROR("Failed to init debugfs\n"); @@ -1576,11 +1870,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return 0; } +/** + * amdgpu_ttm_late_init - Handle any late initialization for + * amdgpu_ttm + */ void amdgpu_ttm_late_init(struct amdgpu_device *adev) { + /* return the VGA stolen memory (if any) back to VRAM */ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL); } +/** + * amdgpu_ttm_fini - De-initialize the TTM memory pools + */ void amdgpu_ttm_fini(struct amdgpu_device *adev) { if (!adev->mman.initialized) @@ -1908,6 +2210,11 @@ static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { #endif }; +/** + * amdgpu_ttm_vram_read - Linear read access to VRAM + * + * Accesses VRAM via MMIO for debugging purposes. + */ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -1947,6 +2254,11 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_ttm_vram_write - Linear write access to VRAM + * + * Accesses VRAM via MMIO for debugging purposes. + */ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { @@ -1995,6 +2307,9 @@ static const struct file_operations amdgpu_ttm_vram_fops = { #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS +/** + * amdgpu_ttm_gtt_read - Linear read access to GTT memory + */ static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -2042,6 +2357,13 @@ static const struct file_operations amdgpu_ttm_gtt_fops = { #endif +/** + * amdgpu_iomem_read - Virtual read access to GPU mapped memory + * + * This function is used to read memory that has been mapped to the + * GPU and the known addresses are not physical addresses but instead + * bus addresses (e.g., what you'd put in an IB or ring buffer). + */ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { @@ -2050,6 +2372,7 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, ssize_t result = 0; int r; + /* retrieve the IOMMU domain if any for this device */ dom = iommu_get_domain_for_dev(adev->dev); while (size) { @@ -2062,6 +2385,10 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, bytes = bytes < size ? bytes : size; + /* Translate the bus address to a physical address. If + * the domain is NULL it means there is no IOMMU active + * and the address translation is the identity + */ addr = dom ? iommu_iova_to_phys(dom, addr) : addr; pfn = addr >> PAGE_SHIFT; @@ -2086,6 +2413,13 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, return result; } +/** + * amdgpu_iomem_write - Virtual write access to GPU mapped memory + * + * This function is used to write memory that has been mapped to the + * GPU and the known addresses are not physical addresses but instead + * bus addresses (e.g., what you'd put in an IB or ring buffer). + */ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) { -- cgit v1.2.1 From 6f0fd919471cf2477e86e2be9b53ecae37b0e815 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 17 May 2018 12:33:34 -0500 Subject: drm/amdgpu: count fences from all uvd instances in idle handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current multi-UVD hardware uses a single clock and power source so handle all instances in the idle handler. Reviewed-by: James Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 0772680371a1..be2917c6698e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1146,7 +1146,11 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); - unsigned fences = amdgpu_fence_count_emitted(&adev->uvd.inst->ring); + unsigned fences = 0, i; + + for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); + } if (fences == 0) { if (adev->pm.dpm_enabled) { -- cgit v1.2.1 From 4bd2c5dd763866b827dd7e95b9ea71c47fa06126 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 17 May 2018 12:45:52 -0500 Subject: drm/amdgpu: Take uvd encode rings into account in idle work (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Take the encode rings into account in the idle work handler. v2: fix typo: s/num_uvd_inst/num_enc_rings/ Reviewed-by: James Zhu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index be2917c6698e..bcf68f80bbf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1146,10 +1146,13 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); - unsigned fences = 0, i; + unsigned fences = 0, i, j; for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); + for (j = 0; j < adev->uvd.num_enc_rings; ++j) { + fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); + } } if (fences == 0) { -- cgit v1.2.1 From 646e906d1d64fdc6bb1a27dac45144dfd8996071 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 17 May 2018 13:03:05 -0500 Subject: drm/amdgpu: Take vcn encode rings into account in idle work MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Take the encode rings into account in the idle work handler. Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index e5d234cf804f..60468385e6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -205,6 +205,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) struct amdgpu_device *adev = container_of(work, struct amdgpu_device, vcn.idle_work.work); unsigned fences = amdgpu_fence_count_emitted(&adev->vcn.ring_dec); + unsigned i; + + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]); + } if (fences == 0) { if (adev->pm.dpm_enabled) { -- cgit v1.2.1 From b79655c37b209315d3b533f6d63a3d6f5fcb6f84 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 12 Apr 2018 22:40:02 -0400 Subject: drm/amd/display: Cleanup unused SetPlaneConfig Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 69 ---------------------- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 10 ---- drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h | 5 -- 3 files changed, 84 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 2288d0aa773b..ae500421edb6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -2269,74 +2269,6 @@ static void program_gamut_remap(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); } - -/** - * TODO REMOVE, USE UPDATE INSTEAD - */ -static void set_plane_config( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct resource_context *res_ctx) -{ - struct mem_input *mi = pipe_ctx->plane_res.mi; - struct dc_plane_state *plane_state = pipe_ctx->plane_state; - struct xfm_grph_csc_adjustment adjust; - struct out_csc_color_matrix tbl_entry; - unsigned int i; - - memset(&adjust, 0, sizeof(adjust)); - memset(&tbl_entry, 0, sizeof(tbl_entry)); - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; - - dce_enable_fe_clock(dc->hwseq, mi->inst, true); - - set_default_colors(pipe_ctx); - if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) { - tbl_entry.color_space = - pipe_ctx->stream->output_color_space; - - for (i = 0; i < 12; i++) - tbl_entry.regval[i] = - pipe_ctx->stream->csc_color_matrix.matrix[i]; - - pipe_ctx->plane_res.xfm->funcs->opp_set_csc_adjustment - (pipe_ctx->plane_res.xfm, &tbl_entry); - } - - if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) { - adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; - - for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) - adjust.temperature_matrix[i] = - pipe_ctx->stream->gamut_remap_matrix.matrix[i]; - } - - pipe_ctx->plane_res.xfm->funcs->transform_set_gamut_remap(pipe_ctx->plane_res.xfm, &adjust); - - pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - program_scaler(dc, pipe_ctx); - - program_surface_visibility(dc, pipe_ctx); - - mi->funcs->mem_input_program_surface_config( - mi, - plane_state->format, - &plane_state->tiling_info, - &plane_state->plane_size, - plane_state->rotation, - NULL, - false); - if (mi->funcs->set_blank) - mi->funcs->set_blank(mi, pipe_ctx->plane_state->visible); - - if (dc->config.gpu_vm_support) - mi->funcs->mem_input_program_pte_vm( - pipe_ctx->plane_res.mi, - plane_state->format, - &plane_state->tiling_info, - plane_state->rotation); -} - static void update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx) { @@ -3023,7 +2955,6 @@ static const struct hw_sequencer_funcs dce110_funcs = { .init_hw = init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dce110_apply_ctx_for_surface, - .set_plane_config = set_plane_config, .update_plane_addr = update_plane_addr, .update_pending_status = dce110_update_pending_status, .set_input_transfer_func = dce110_set_input_transfer_func, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 572fa601a0eb..8adb8dc44af5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2487,15 +2487,6 @@ static void set_static_screen_control(struct pipe_ctx **pipe_ctx, set_static_screen_control(pipe_ctx[i]->stream_res.tg, value); } -static void set_plane_config( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct resource_context *res_ctx) -{ - /* TODO */ - program_gamut_remap(pipe_ctx); -} - static void dcn10_config_stereo_parameters( struct dc_stream_state *stream, struct crtc_stereo_flags *flags) { @@ -2673,7 +2664,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = { .init_hw = dcn10_init_hw, .apply_ctx_to_hw = dce110_apply_ctx_to_hw, .apply_ctx_for_surface = dcn10_apply_ctx_for_surface, - .set_plane_config = set_plane_config, .update_plane_addr = dcn10_update_plane_addr, .update_dchub = dcn10_update_dchub, .update_pending_status = dcn10_update_pending_status, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 29abf3ecb39c..63fc6c499789 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -83,11 +83,6 @@ struct hw_sequencer_funcs { int num_planes, struct dc_state *context); - void (*set_plane_config)( - const struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct resource_context *res_ctx); - void (*program_gamut_remap)( struct pipe_ctx *pipe_ctx); -- cgit v1.2.1 From eb0e515464e4a1be730c7ac7a01c3ba04c98ea97 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 18 Apr 2018 11:37:53 -0400 Subject: drm/amd/display: get rid of 32.32 unsigned fixed point 32.32 is redundant, 31.32 does everything we use 32.32 for Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 14 +- drivers/gpu/drm/amd/display/dc/basics/Makefile | 2 +- drivers/gpu/drm/amd/display/dc/basics/conversion.c | 28 +- drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 176 ++++---- drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c | 161 -------- .../gpu/drm/amd/display/dc/calcs/custom_float.c | 46 +-- drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 8 +- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 12 +- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 68 ++-- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 2 + drivers/gpu/drm/amd/display/dc/dc_types.h | 2 +- drivers/gpu/drm/amd/display/dc/dce/dce_abm.c | 2 +- .../gpu/drm/amd/display/dc/dce/dce_clock_source.c | 60 +-- drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c | 26 +- drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c | 2 +- drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c | 6 +- .../gpu/drm/amd/display/dc/dce/dce_scl_filters.c | 48 +-- .../drm/amd/display/dc/dce/dce_stream_encoder.c | 8 +- drivers/gpu/drm/amd/display/dc/dce/dce_transform.c | 26 +- .../amd/display/dc/dce110/dce110_hw_sequencer.c | 36 +- .../drm/amd/display/dc/dce110/dce110_transform_v.c | 8 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c | 86 ++-- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 2 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c | 6 +- .../gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 38 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 4 +- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 28 +- .../amd/display/dc/dcn10/dcn10_stream_encoder.c | 8 +- drivers/gpu/drm/amd/display/dc/irq_types.h | 2 + drivers/gpu/drm/amd/display/include/fixed31_32.h | 118 +++--- drivers/gpu/drm/amd/display/include/fixed32_32.h | 129 ------ .../drm/amd/display/modules/color/color_gamma.c | 446 ++++++++++----------- 32 files changed, 661 insertions(+), 947 deletions(-) delete mode 100644 drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c delete mode 100644 drivers/gpu/drm/amd/display/include/fixed32_32.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index e3d90e918d1b..b329393307e5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -88,9 +88,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, g = drm_color_lut_extract(lut[i].green, 16); b = drm_color_lut_extract(lut[i].blue, 16); - gamma->entries.red[i] = dal_fixed31_32_from_int(r); - gamma->entries.green[i] = dal_fixed31_32_from_int(g); - gamma->entries.blue[i] = dal_fixed31_32_from_int(b); + gamma->entries.red[i] = dc_fixpt_from_int(r); + gamma->entries.green[i] = dc_fixpt_from_int(g); + gamma->entries.blue[i] = dc_fixpt_from_int(b); } return; } @@ -101,9 +101,9 @@ static void __drm_lut_to_dc_gamma(struct drm_color_lut *lut, g = drm_color_lut_extract(lut[i].green, 16); b = drm_color_lut_extract(lut[i].blue, 16); - gamma->entries.red[i] = dal_fixed31_32_from_fraction(r, MAX_DRM_LUT_VALUE); - gamma->entries.green[i] = dal_fixed31_32_from_fraction(g, MAX_DRM_LUT_VALUE); - gamma->entries.blue[i] = dal_fixed31_32_from_fraction(b, MAX_DRM_LUT_VALUE); + gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE); } } @@ -208,7 +208,7 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc) for (i = 0; i < 12; i++) { /* Skip 4th element */ if (i % 4 == 3) { - stream->gamut_remap_matrix.matrix[i] = dal_fixed31_32_zero; + stream->gamut_remap_matrix.matrix[i] = dc_fixpt_zero; continue; } diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile index bca33bd9a0d2..b49ea96b5dae 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/Makefile +++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile @@ -24,7 +24,7 @@ # It provides the general basic services required by other DAL # subcomponents. -BASICS = conversion.o fixpt31_32.o fixpt32_32.o \ +BASICS = conversion.o fixpt31_32.o \ logger.o log_helpers.o vector.o AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS)) diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c index 310964915a83..50b47f11875c 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c +++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c @@ -41,22 +41,22 @@ uint16_t fixed_point_to_int_frac( uint16_t result; - uint16_t d = (uint16_t)dal_fixed31_32_floor( - dal_fixed31_32_abs( + uint16_t d = (uint16_t)dc_fixpt_floor( + dc_fixpt_abs( arg)); if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) - numerator = (uint16_t)dal_fixed31_32_round( - dal_fixed31_32_mul_int( + numerator = (uint16_t)dc_fixpt_round( + dc_fixpt_mul_int( arg, divisor)); else { - numerator = dal_fixed31_32_floor( - dal_fixed31_32_sub( - dal_fixed31_32_from_int( + numerator = dc_fixpt_floor( + dc_fixpt_sub( + dc_fixpt_from_int( 1LL << integer_bits), - dal_fixed31_32_recip( - dal_fixed31_32_from_int( + dc_fixpt_recip( + dc_fixpt_from_int( divisor)))); } @@ -66,8 +66,8 @@ uint16_t fixed_point_to_int_frac( result = (uint16_t)( (1 << (integer_bits + fractional_bits + 1)) + numerator); - if ((result != 0) && dal_fixed31_32_lt( - arg, dal_fixed31_32_zero)) + if ((result != 0) && dc_fixpt_lt( + arg, dc_fixpt_zero)) result |= 1 << (integer_bits + fractional_bits); return result; @@ -84,15 +84,15 @@ void convert_float_matrix( uint32_t buffer_size) { const struct fixed31_32 min_2_13 = - dal_fixed31_32_from_fraction(S2D13_MIN, DIVIDER); + dc_fixpt_from_fraction(S2D13_MIN, DIVIDER); const struct fixed31_32 max_2_13 = - dal_fixed31_32_from_fraction(S2D13_MAX, DIVIDER); + dc_fixpt_from_fraction(S2D13_MAX, DIVIDER); uint32_t i; for (i = 0; i < buffer_size; ++i) { uint32_t reg_value = fixed_point_to_int_frac( - dal_fixed31_32_clamp( + dc_fixpt_clamp( flt[i], min_2_13, max_2_13), diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index 7191c3213743..e398ecdf742c 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -64,7 +64,7 @@ static inline unsigned long long complete_integer_division_u64( #define GET_FRACTIONAL_PART(x) \ (FRACTIONAL_PART_MASK & (x)) -struct fixed31_32 dal_fixed31_32_from_fraction( +struct fixed31_32 dc_fixpt_from_fraction( long long numerator, long long denominator) { @@ -118,7 +118,7 @@ struct fixed31_32 dal_fixed31_32_from_fraction( return res; } -struct fixed31_32 dal_fixed31_32_from_int_nonconst( +struct fixed31_32 dc_fixpt_from_int_nonconst( long long arg) { struct fixed31_32 res; @@ -130,7 +130,7 @@ struct fixed31_32 dal_fixed31_32_from_int_nonconst( return res; } -struct fixed31_32 dal_fixed31_32_shl( +struct fixed31_32 dc_fixpt_shl( struct fixed31_32 arg, unsigned char shift) { @@ -144,7 +144,7 @@ struct fixed31_32 dal_fixed31_32_shl( return res; } -struct fixed31_32 dal_fixed31_32_add( +struct fixed31_32 dc_fixpt_add( struct fixed31_32 arg1, struct fixed31_32 arg2) { @@ -158,7 +158,7 @@ struct fixed31_32 dal_fixed31_32_add( return res; } -struct fixed31_32 dal_fixed31_32_sub( +struct fixed31_32 dc_fixpt_sub( struct fixed31_32 arg1, struct fixed31_32 arg2) { @@ -172,7 +172,7 @@ struct fixed31_32 dal_fixed31_32_sub( return res; } -struct fixed31_32 dal_fixed31_32_mul( +struct fixed31_32 dc_fixpt_mul( struct fixed31_32 arg1, struct fixed31_32 arg2) { @@ -213,7 +213,7 @@ struct fixed31_32 dal_fixed31_32_mul( tmp = arg1_fra * arg2_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + - (tmp >= (unsigned long long)dal_fixed31_32_half.value); + (tmp >= (unsigned long long)dc_fixpt_half.value); ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); @@ -225,7 +225,7 @@ struct fixed31_32 dal_fixed31_32_mul( return res; } -struct fixed31_32 dal_fixed31_32_sqr( +struct fixed31_32 dc_fixpt_sqr( struct fixed31_32 arg) { struct fixed31_32 res; @@ -257,7 +257,7 @@ struct fixed31_32 dal_fixed31_32_sqr( tmp = arg_fra * arg_fra; tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) + - (tmp >= (unsigned long long)dal_fixed31_32_half.value); + (tmp >= (unsigned long long)dc_fixpt_half.value); ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value)); @@ -266,7 +266,7 @@ struct fixed31_32 dal_fixed31_32_sqr( return res; } -struct fixed31_32 dal_fixed31_32_recip( +struct fixed31_32 dc_fixpt_recip( struct fixed31_32 arg) { /* @@ -276,41 +276,41 @@ struct fixed31_32 dal_fixed31_32_recip( ASSERT(arg.value); - return dal_fixed31_32_from_fraction( - dal_fixed31_32_one.value, + return dc_fixpt_from_fraction( + dc_fixpt_one.value, arg.value); } -struct fixed31_32 dal_fixed31_32_sinc( +struct fixed31_32 dc_fixpt_sinc( struct fixed31_32 arg) { struct fixed31_32 square; - struct fixed31_32 res = dal_fixed31_32_one; + struct fixed31_32 res = dc_fixpt_one; int n = 27; struct fixed31_32 arg_norm = arg; - if (dal_fixed31_32_le( - dal_fixed31_32_two_pi, - dal_fixed31_32_abs(arg))) { - arg_norm = dal_fixed31_32_sub( + if (dc_fixpt_le( + dc_fixpt_two_pi, + dc_fixpt_abs(arg))) { + arg_norm = dc_fixpt_sub( arg_norm, - dal_fixed31_32_mul_int( - dal_fixed31_32_two_pi, + dc_fixpt_mul_int( + dc_fixpt_two_pi, (int)div64_s64( arg_norm.value, - dal_fixed31_32_two_pi.value))); + dc_fixpt_two_pi.value))); } - square = dal_fixed31_32_sqr(arg_norm); + square = dc_fixpt_sqr(arg_norm); do { - res = dal_fixed31_32_sub( - dal_fixed31_32_one, - dal_fixed31_32_div_int( - dal_fixed31_32_mul( + res = dc_fixpt_sub( + dc_fixpt_one, + dc_fixpt_div_int( + dc_fixpt_mul( square, res), n * (n - 1))); @@ -319,37 +319,37 @@ struct fixed31_32 dal_fixed31_32_sinc( } while (n > 2); if (arg.value != arg_norm.value) - res = dal_fixed31_32_div( - dal_fixed31_32_mul(res, arg_norm), + res = dc_fixpt_div( + dc_fixpt_mul(res, arg_norm), arg); return res; } -struct fixed31_32 dal_fixed31_32_sin( +struct fixed31_32 dc_fixpt_sin( struct fixed31_32 arg) { - return dal_fixed31_32_mul( + return dc_fixpt_mul( arg, - dal_fixed31_32_sinc(arg)); + dc_fixpt_sinc(arg)); } -struct fixed31_32 dal_fixed31_32_cos( +struct fixed31_32 dc_fixpt_cos( struct fixed31_32 arg) { /* TODO implement argument normalization */ - const struct fixed31_32 square = dal_fixed31_32_sqr(arg); + const struct fixed31_32 square = dc_fixpt_sqr(arg); - struct fixed31_32 res = dal_fixed31_32_one; + struct fixed31_32 res = dc_fixpt_one; int n = 26; do { - res = dal_fixed31_32_sub( - dal_fixed31_32_one, - dal_fixed31_32_div_int( - dal_fixed31_32_mul( + res = dc_fixpt_sub( + dc_fixpt_one, + dc_fixpt_div_int( + dc_fixpt_mul( square, res), n * (n - 1))); @@ -372,31 +372,31 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series( { unsigned int n = 9; - struct fixed31_32 res = dal_fixed31_32_from_fraction( + struct fixed31_32 res = dc_fixpt_from_fraction( n + 2, n + 1); /* TODO find correct res */ - ASSERT(dal_fixed31_32_lt(arg, dal_fixed31_32_one)); + ASSERT(dc_fixpt_lt(arg, dc_fixpt_one)); do - res = dal_fixed31_32_add( - dal_fixed31_32_one, - dal_fixed31_32_div_int( - dal_fixed31_32_mul( + res = dc_fixpt_add( + dc_fixpt_one, + dc_fixpt_div_int( + dc_fixpt_mul( arg, res), n)); while (--n != 1); - return dal_fixed31_32_add( - dal_fixed31_32_one, - dal_fixed31_32_mul( + return dc_fixpt_add( + dc_fixpt_one, + dc_fixpt_mul( arg, res)); } -struct fixed31_32 dal_fixed31_32_exp( +struct fixed31_32 dc_fixpt_exp( struct fixed31_32 arg) { /* @@ -406,44 +406,44 @@ struct fixed31_32 dal_fixed31_32_exp( * where m = round(x / ln(2)), r = x - m * ln(2) */ - if (dal_fixed31_32_le( - dal_fixed31_32_ln2_div_2, - dal_fixed31_32_abs(arg))) { - int m = dal_fixed31_32_round( - dal_fixed31_32_div( + if (dc_fixpt_le( + dc_fixpt_ln2_div_2, + dc_fixpt_abs(arg))) { + int m = dc_fixpt_round( + dc_fixpt_div( arg, - dal_fixed31_32_ln2)); + dc_fixpt_ln2)); - struct fixed31_32 r = dal_fixed31_32_sub( + struct fixed31_32 r = dc_fixpt_sub( arg, - dal_fixed31_32_mul_int( - dal_fixed31_32_ln2, + dc_fixpt_mul_int( + dc_fixpt_ln2, m)); ASSERT(m != 0); - ASSERT(dal_fixed31_32_lt( - dal_fixed31_32_abs(r), - dal_fixed31_32_one)); + ASSERT(dc_fixpt_lt( + dc_fixpt_abs(r), + dc_fixpt_one)); if (m > 0) - return dal_fixed31_32_shl( + return dc_fixpt_shl( fixed31_32_exp_from_taylor_series(r), (unsigned char)m); else - return dal_fixed31_32_div_int( + return dc_fixpt_div_int( fixed31_32_exp_from_taylor_series(r), 1LL << -m); } else if (arg.value != 0) return fixed31_32_exp_from_taylor_series(arg); else - return dal_fixed31_32_one; + return dc_fixpt_one; } -struct fixed31_32 dal_fixed31_32_log( +struct fixed31_32 dc_fixpt_log( struct fixed31_32 arg) { - struct fixed31_32 res = dal_fixed31_32_neg(dal_fixed31_32_one); + struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one); /* TODO improve 1st estimation */ struct fixed31_32 error; @@ -453,15 +453,15 @@ struct fixed31_32 dal_fixed31_32_log( /* TODO if arg is zero, return -INF */ do { - struct fixed31_32 res1 = dal_fixed31_32_add( - dal_fixed31_32_sub( + struct fixed31_32 res1 = dc_fixpt_add( + dc_fixpt_sub( res, - dal_fixed31_32_one), - dal_fixed31_32_div( + dc_fixpt_one), + dc_fixpt_div( arg, - dal_fixed31_32_exp(res))); + dc_fixpt_exp(res))); - error = dal_fixed31_32_sub( + error = dc_fixpt_sub( res, res1); @@ -472,17 +472,17 @@ struct fixed31_32 dal_fixed31_32_log( return res; } -struct fixed31_32 dal_fixed31_32_pow( +struct fixed31_32 dc_fixpt_pow( struct fixed31_32 arg1, struct fixed31_32 arg2) { - return dal_fixed31_32_exp( - dal_fixed31_32_mul( - dal_fixed31_32_log(arg1), + return dc_fixpt_exp( + dc_fixpt_mul( + dc_fixpt_log(arg1), arg2)); } -int dal_fixed31_32_floor( +int dc_fixpt_floor( struct fixed31_32 arg) { unsigned long long arg_value = abs_i64(arg.value); @@ -493,12 +493,12 @@ int dal_fixed31_32_floor( return -(int)GET_INTEGER_PART(arg_value); } -int dal_fixed31_32_round( +int dc_fixpt_round( struct fixed31_32 arg) { unsigned long long arg_value = abs_i64(arg.value); - const long long summand = dal_fixed31_32_half.value; + const long long summand = dc_fixpt_half.value; ASSERT(LLONG_MAX - (long long)arg_value >= summand); @@ -510,13 +510,13 @@ int dal_fixed31_32_round( return -(int)GET_INTEGER_PART(arg_value); } -int dal_fixed31_32_ceil( +int dc_fixpt_ceil( struct fixed31_32 arg) { unsigned long long arg_value = abs_i64(arg.value); - const long long summand = dal_fixed31_32_one.value - - dal_fixed31_32_epsilon.value; + const long long summand = dc_fixpt_one.value - + dc_fixpt_epsilon.value; ASSERT(LLONG_MAX - (long long)arg_value >= summand); @@ -531,7 +531,7 @@ int dal_fixed31_32_ceil( /* this function is a generic helper to translate fixed point value to * specified integer format that will consist of integer_bits integer part and * fractional_bits fractional part. For example it is used in - * dal_fixed31_32_u2d19 to receive 2 bits integer part and 19 bits fractional + * dc_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional * part in 32 bits. It is used in hw programming (scaler) */ @@ -570,35 +570,35 @@ static inline unsigned int clamp_ux_dy( return min_clamp; } -unsigned int dal_fixed31_32_u2d19( +unsigned int dc_fixpt_u2d19( struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); } -unsigned int dal_fixed31_32_u0d19( +unsigned int dc_fixpt_u0d19( struct fixed31_32 arg) { return ux_dy(arg.value, 0, 19); } -unsigned int dal_fixed31_32_clamp_u0d14( +unsigned int dc_fixpt_clamp_u0d14( struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 14, 1); } -unsigned int dal_fixed31_32_clamp_u0d10( +unsigned int dc_fixpt_clamp_u0d10( struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 10, 1); } -int dal_fixed31_32_s4d19( +int dc_fixpt_s4d19( struct fixed31_32 arg) { if (arg.value < 0) - return -(int)ux_dy(dal_fixed31_32_abs(arg).value, 4, 19); + return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19); else return ux_dy(arg.value, 4, 19); } diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c deleted file mode 100644 index 4d3aaa82a07b..000000000000 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt32_32.c +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "include/fixed32_32.h" - -static uint64_t u64_div(uint64_t n, uint64_t d) -{ - uint32_t i = 0; - uint64_t r; - uint64_t q = div64_u64_rem(n, d, &r); - - for (i = 0; i < 32; ++i) { - uint64_t sbit = q & (1ULL<<63); - - r <<= 1; - r |= sbit ? 1 : 0; - q <<= 1; - if (r >= d) { - r -= d; - q |= 1; - } - } - - if (2*r >= d) - q += 1; - return q; -} - -struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d) -{ - struct fixed32_32 fx; - - fx.value = u64_div((uint64_t)n << 32, (uint64_t)d << 32); - return fx; -} - -struct fixed32_32 dal_fixed32_32_add( - struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - struct fixed32_32 fx = {lhs.value + rhs.value}; - - ASSERT(fx.value >= rhs.value); - return fx; -} - -struct fixed32_32 dal_fixed32_32_add_int(struct fixed32_32 lhs, uint32_t rhs) -{ - struct fixed32_32 fx = {lhs.value + ((uint64_t)rhs << 32)}; - - ASSERT(fx.value >= (uint64_t)rhs << 32); - return fx; - -} -struct fixed32_32 dal_fixed32_32_sub( - struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - struct fixed32_32 fx; - - ASSERT(lhs.value >= rhs.value); - fx.value = lhs.value - rhs.value; - return fx; -} - -struct fixed32_32 dal_fixed32_32_sub_int(struct fixed32_32 lhs, uint32_t rhs) -{ - struct fixed32_32 fx; - - ASSERT(lhs.value >= ((uint64_t)rhs<<32)); - fx.value = lhs.value - ((uint64_t)rhs<<32); - return fx; -} - -struct fixed32_32 dal_fixed32_32_mul( - struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - struct fixed32_32 fx; - uint64_t lhs_int = lhs.value>>32; - uint64_t lhs_frac = (uint32_t)lhs.value; - uint64_t rhs_int = rhs.value>>32; - uint64_t rhs_frac = (uint32_t)rhs.value; - uint64_t ahbh = lhs_int * rhs_int; - uint64_t ahbl = lhs_int * rhs_frac; - uint64_t albh = lhs_frac * rhs_int; - uint64_t albl = lhs_frac * rhs_frac; - - ASSERT((ahbh>>32) == 0); - - fx.value = (ahbh<<32) + ahbl + albh + (albl>>32); - return fx; - -} - -struct fixed32_32 dal_fixed32_32_mul_int(struct fixed32_32 lhs, uint32_t rhs) -{ - struct fixed32_32 fx; - uint64_t lhsi = (lhs.value>>32) * (uint64_t)rhs; - uint64_t lhsf; - - ASSERT((lhsi>>32) == 0); - lhsf = ((uint32_t)lhs.value) * (uint64_t)rhs; - ASSERT((lhsi<<32) + lhsf >= lhsf); - fx.value = (lhsi<<32) + lhsf; - return fx; -} - -struct fixed32_32 dal_fixed32_32_div( - struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - struct fixed32_32 fx; - - fx.value = u64_div(lhs.value, rhs.value); - return fx; -} - -struct fixed32_32 dal_fixed32_32_div_int(struct fixed32_32 lhs, uint32_t rhs) -{ - struct fixed32_32 fx; - - fx.value = u64_div(lhs.value, (uint64_t)rhs << 32); - return fx; -} - -uint32_t dal_fixed32_32_ceil(struct fixed32_32 v) -{ - ASSERT((uint32_t)v.value ? (v.value >> 32) + 1 >= 1 : true); - return (v.value>>32) + ((uint32_t)v.value ? 1 : 0); -} - -uint32_t dal_fixed32_32_round(struct fixed32_32 v) -{ - ASSERT(v.value + (1ULL<<31) >= (1ULL<<31)); - return (v.value + (1ULL<<31))>>32; -} - diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c index 7243c37f569e..31d167bc548f 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c @@ -36,41 +36,41 @@ static bool build_custom_float( uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1; const struct fixed31_32 mantissa_constant_plus_max_fraction = - dal_fixed31_32_from_fraction( + dc_fixpt_from_fraction( (1LL << (format->mantissa_bits + 1)) - 1, 1LL << format->mantissa_bits); struct fixed31_32 mantiss; - if (dal_fixed31_32_eq( + if (dc_fixpt_eq( value, - dal_fixed31_32_zero)) { + dc_fixpt_zero)) { *negative = false; *mantissa = 0; *exponenta = 0; return true; } - if (dal_fixed31_32_lt( + if (dc_fixpt_lt( value, - dal_fixed31_32_zero)) { + dc_fixpt_zero)) { *negative = format->sign; - value = dal_fixed31_32_neg(value); + value = dc_fixpt_neg(value); } else { *negative = false; } - if (dal_fixed31_32_lt( + if (dc_fixpt_lt( value, - dal_fixed31_32_one)) { + dc_fixpt_one)) { uint32_t i = 1; do { - value = dal_fixed31_32_shl(value, 1); + value = dc_fixpt_shl(value, 1); ++i; - } while (dal_fixed31_32_lt( + } while (dc_fixpt_lt( value, - dal_fixed31_32_one)); + dc_fixpt_one)); --i; @@ -81,15 +81,15 @@ static bool build_custom_float( } *exponenta = exp_offset - i; - } else if (dal_fixed31_32_le( + } else if (dc_fixpt_le( mantissa_constant_plus_max_fraction, value)) { uint32_t i = 1; do { - value = dal_fixed31_32_shr(value, 1); + value = dc_fixpt_shr(value, 1); ++i; - } while (dal_fixed31_32_lt( + } while (dc_fixpt_lt( mantissa_constant_plus_max_fraction, value)); @@ -98,23 +98,23 @@ static bool build_custom_float( *exponenta = exp_offset; } - mantiss = dal_fixed31_32_sub( + mantiss = dc_fixpt_sub( value, - dal_fixed31_32_one); + dc_fixpt_one); - if (dal_fixed31_32_lt( + if (dc_fixpt_lt( mantiss, - dal_fixed31_32_zero) || - dal_fixed31_32_lt( - dal_fixed31_32_one, + dc_fixpt_zero) || + dc_fixpt_lt( + dc_fixpt_one, mantiss)) - mantiss = dal_fixed31_32_zero; + mantiss = dc_fixpt_zero; else - mantiss = dal_fixed31_32_shl( + mantiss = dc_fixpt_shl( mantiss, format->mantissa_bits); - *mantissa = dal_fixed31_32_floor(mantiss); + *mantissa = dc_fixpt_floor(mantiss); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index a102c192328d..49a4ea45466d 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -873,14 +873,14 @@ bool dcn_validate_bandwidth( } if (pipe->plane_state->rotation % 2 == 0) { - ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]); - ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]); } else { - ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dal_fixed31_32_one.value + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]); - ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dal_fixed31_32_one.value + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]); } v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b44cf52090a5..ea5d5ffd5522 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -631,7 +631,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) /* Need to setup mst link_cap struct here * otherwise dc_link_detect() will leave mst link_cap * empty which leads to allocate_mst_payload() has "0" - * pbn_per_slot value leading to exception on dal_fixed31_32_div() + * pbn_per_slot value leading to exception on dc_fixpt_div() */ link->verified_link_cap = link->reported_link_cap; return false; @@ -2059,10 +2059,10 @@ static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) &stream->sink->link->cur_link_settings; uint32_t link_rate_in_mbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ; - struct fixed31_32 mbps = dal_fixed31_32_from_int( + struct fixed31_32 mbps = dc_fixpt_from_int( link_rate_in_mbps * link_settings->lane_count); - return dal_fixed31_32_div_int(mbps, 54); + return dc_fixpt_div_int(mbps, 54); } static int get_color_depth(enum dc_color_depth color_depth) @@ -2103,7 +2103,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) numerator = 64 * PEAK_FACTOR_X1000; denominator = 54 * 8 * 1000 * 1000; kbps *= numerator; - peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator); + peak_kbps = dc_fixpt_from_fraction(kbps, denominator); return peak_kbps; } @@ -2230,7 +2230,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); pbn = get_pbn_from_timing(pipe_ctx); - avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); stream_encoder->funcs->set_mst_bandwidth( stream_encoder, @@ -2247,7 +2247,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct link_encoder *link_encoder = link->link_enc; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; - struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0); + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); uint8_t i; bool mst_mode = (link->type == dc_connection_mst_branch); DC_LOGGER_INIT(link->ctx->logger); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 345835ff58d1..082458f2097c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -496,9 +496,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx) data->viewport_c.x = data->viewport.x / vpc_div; data->viewport_c.y = data->viewport.y / vpc_div; data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? - dal_fixed31_32_half : dal_fixed31_32_zero; + dc_fixpt_half : dc_fixpt_zero; data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? - dal_fixed31_32_half : dal_fixed31_32_zero; + dc_fixpt_half : dc_fixpt_zero; /* Round up, assume original video size always even dimensions */ data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div; data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div; @@ -627,10 +627,10 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) rect_swap_helper(&surf_src); - pipe_ctx->plane_res.scl_data.ratios.horz = dal_fixed31_32_from_fraction( + pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_from_fraction( surf_src.width, plane_state->dst_rect.width); - pipe_ctx->plane_res.scl_data.ratios.vert = dal_fixed31_32_from_fraction( + pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_from_fraction( surf_src.height, plane_state->dst_rect.height); @@ -688,32 +688,32 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r * init_bot = init + scaling_ratio * init_c = init + truncated_vp_c_offset(from calculate viewport) */ - data->inits.h = dal_fixed31_32_div_int( - dal_fixed31_32_add_int(data->ratios.horz, data->taps.h_taps + 1), 2); + data->inits.h = dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2); - data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_div_int( - dal_fixed31_32_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)); + data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)); - data->inits.v = dal_fixed31_32_div_int( - dal_fixed31_32_add_int(data->ratios.vert, data->taps.v_taps + 1), 2); + data->inits.v = dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2); - data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_div_int( - dal_fixed31_32_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)); + data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)); /* Adjust for viewport end clip-off */ if ((data->viewport.x + data->viewport.width) < (src.x + src.width) && !flip_horz_scan_dir) { int vp_clip = src.x + src.width - data->viewport.width - data->viewport.x; - int int_part = dal_fixed31_32_floor( - dal_fixed31_32_sub(data->inits.h, data->ratios.horz)); + int int_part = dc_fixpt_floor( + dc_fixpt_sub(data->inits.h, data->ratios.horz)); int_part = int_part > 0 ? int_part : 0; data->viewport.width += int_part < vp_clip ? int_part : vp_clip; } if ((data->viewport.y + data->viewport.height) < (src.y + src.height) && !flip_vert_scan_dir) { int vp_clip = src.y + src.height - data->viewport.height - data->viewport.y; - int int_part = dal_fixed31_32_floor( - dal_fixed31_32_sub(data->inits.v, data->ratios.vert)); + int int_part = dc_fixpt_floor( + dc_fixpt_sub(data->inits.v, data->ratios.vert)); int_part = int_part > 0 ? int_part : 0; data->viewport.height += int_part < vp_clip ? int_part : vp_clip; @@ -721,8 +721,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r if ((data->viewport_c.x + data->viewport_c.width) < (src.x + src.width) / vpc_div && !flip_horz_scan_dir) { int vp_clip = (src.x + src.width) / vpc_div - data->viewport_c.width - data->viewport_c.x; - int int_part = dal_fixed31_32_floor( - dal_fixed31_32_sub(data->inits.h_c, data->ratios.horz_c)); + int int_part = dc_fixpt_floor( + dc_fixpt_sub(data->inits.h_c, data->ratios.horz_c)); int_part = int_part > 0 ? int_part : 0; data->viewport_c.width += int_part < vp_clip ? int_part : vp_clip; @@ -730,8 +730,8 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r if ((data->viewport_c.y + data->viewport_c.height) < (src.y + src.height) / vpc_div && !flip_vert_scan_dir) { int vp_clip = (src.y + src.height) / vpc_div - data->viewport_c.height - data->viewport_c.y; - int int_part = dal_fixed31_32_floor( - dal_fixed31_32_sub(data->inits.v_c, data->ratios.vert_c)); + int int_part = dc_fixpt_floor( + dc_fixpt_sub(data->inits.v_c, data->ratios.vert_c)); int_part = int_part > 0 ? int_part : 0; data->viewport_c.height += int_part < vp_clip ? int_part : vp_clip; @@ -741,9 +741,9 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r if (data->viewport.x && !flip_horz_scan_dir) { int int_part; - data->inits.h = dal_fixed31_32_add(data->inits.h, dal_fixed31_32_mul_int( + data->inits.h = dc_fixpt_add(data->inits.h, dc_fixpt_mul_int( data->ratios.horz, recout_skip->width)); - int_part = dal_fixed31_32_floor(data->inits.h) - data->viewport.x; + int_part = dc_fixpt_floor(data->inits.h) - data->viewport.x; if (int_part < data->taps.h_taps) { int int_adj = data->viewport.x >= (data->taps.h_taps - int_part) ? (data->taps.h_taps - int_part) : data->viewport.x; @@ -756,15 +756,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r int_part = data->taps.h_taps; } data->inits.h.value &= 0xffffffff; - data->inits.h = dal_fixed31_32_add_int(data->inits.h, int_part); + data->inits.h = dc_fixpt_add_int(data->inits.h, int_part); } if (data->viewport_c.x && !flip_horz_scan_dir) { int int_part; - data->inits.h_c = dal_fixed31_32_add(data->inits.h_c, dal_fixed31_32_mul_int( + data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_mul_int( data->ratios.horz_c, recout_skip->width)); - int_part = dal_fixed31_32_floor(data->inits.h_c) - data->viewport_c.x; + int_part = dc_fixpt_floor(data->inits.h_c) - data->viewport_c.x; if (int_part < data->taps.h_taps_c) { int int_adj = data->viewport_c.x >= (data->taps.h_taps_c - int_part) ? (data->taps.h_taps_c - int_part) : data->viewport_c.x; @@ -777,15 +777,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r int_part = data->taps.h_taps_c; } data->inits.h_c.value &= 0xffffffff; - data->inits.h_c = dal_fixed31_32_add_int(data->inits.h_c, int_part); + data->inits.h_c = dc_fixpt_add_int(data->inits.h_c, int_part); } if (data->viewport.y && !flip_vert_scan_dir) { int int_part; - data->inits.v = dal_fixed31_32_add(data->inits.v, dal_fixed31_32_mul_int( + data->inits.v = dc_fixpt_add(data->inits.v, dc_fixpt_mul_int( data->ratios.vert, recout_skip->height)); - int_part = dal_fixed31_32_floor(data->inits.v) - data->viewport.y; + int_part = dc_fixpt_floor(data->inits.v) - data->viewport.y; if (int_part < data->taps.v_taps) { int int_adj = data->viewport.y >= (data->taps.v_taps - int_part) ? (data->taps.v_taps - int_part) : data->viewport.y; @@ -798,15 +798,15 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r int_part = data->taps.v_taps; } data->inits.v.value &= 0xffffffff; - data->inits.v = dal_fixed31_32_add_int(data->inits.v, int_part); + data->inits.v = dc_fixpt_add_int(data->inits.v, int_part); } if (data->viewport_c.y && !flip_vert_scan_dir) { int int_part; - data->inits.v_c = dal_fixed31_32_add(data->inits.v_c, dal_fixed31_32_mul_int( + data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_mul_int( data->ratios.vert_c, recout_skip->height)); - int_part = dal_fixed31_32_floor(data->inits.v_c) - data->viewport_c.y; + int_part = dc_fixpt_floor(data->inits.v_c) - data->viewport_c.y; if (int_part < data->taps.v_taps_c) { int int_adj = data->viewport_c.y >= (data->taps.v_taps_c - int_part) ? (data->taps.v_taps_c - int_part) : data->viewport_c.y; @@ -819,12 +819,12 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r int_part = data->taps.v_taps_c; } data->inits.v_c.value &= 0xffffffff; - data->inits.v_c = dal_fixed31_32_add_int(data->inits.v_c, int_part); + data->inits.v_c = dc_fixpt_add_int(data->inits.v_c, int_part); } /* Interlaced inits based on final vert inits */ - data->inits.v_bot = dal_fixed31_32_add(data->inits.v, data->ratios.vert); - data->inits.v_c_bot = dal_fixed31_32_add(data->inits.v_c, data->ratios.vert_c); + data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert); + data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 || pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 2726b02e006b..90bccd5ccaa2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -26,6 +26,8 @@ #ifndef DC_DP_TYPES_H #define DC_DP_TYPES_H +#include "os_types.h" + enum dc_lane_count { LANE_COUNT_UNKNOWN = 0, LANE_COUNT_ONE = 1, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 9defe3b17617..76df2534c4a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,7 +25,7 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ -#include "fixed32_32.h" +#include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" #include "dc_dp_types.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index fe92a1222803..29294db1a96b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -26,7 +26,7 @@ #include "dce_abm.h" #include "dm_services.h" #include "reg_helper.h" -#include "fixed32_32.h" +#include "fixed31_32.h" #include "dc.h" #include "atom.h" diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 0570e7e4d0a0..599c7ab6befe 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -657,12 +657,12 @@ static uint32_t dce110_get_d_to_pixel_rate_in_hz( return 0; } - pix_rate = dal_fixed31_32_from_int(clk_src->ref_freq_khz); - pix_rate = dal_fixed31_32_mul_int(pix_rate, 1000); - pix_rate = dal_fixed31_32_mul_int(pix_rate, phase); - pix_rate = dal_fixed31_32_div_int(pix_rate, modulo); + pix_rate = dc_fixpt_from_int(clk_src->ref_freq_khz); + pix_rate = dc_fixpt_mul_int(pix_rate, 1000); + pix_rate = dc_fixpt_mul_int(pix_rate, phase); + pix_rate = dc_fixpt_div_int(pix_rate, modulo); - return dal_fixed31_32_round(pix_rate); + return dc_fixpt_round(pix_rate); } else { return dce110_get_dp_pixel_rate_from_combo_phy_pll(cs, pix_clk_params, pll_settings); } @@ -711,12 +711,12 @@ static bool calculate_ss( const struct spread_spectrum_data *ss_data, struct delta_sigma_data *ds_data) { - struct fixed32_32 fb_div; - struct fixed32_32 ss_amount; - struct fixed32_32 ss_nslip_amount; - struct fixed32_32 ss_ds_frac_amount; - struct fixed32_32 ss_step_size; - struct fixed32_32 modulation_time; + struct fixed31_32 fb_div; + struct fixed31_32 ss_amount; + struct fixed31_32 ss_nslip_amount; + struct fixed31_32 ss_ds_frac_amount; + struct fixed31_32 ss_step_size; + struct fixed31_32 modulation_time; if (ds_data == NULL) return false; @@ -731,42 +731,42 @@ static bool calculate_ss( /* compute SS_AMOUNT_FBDIV & SS_AMOUNT_NFRAC_SLIP & SS_AMOUNT_DSFRAC*/ /* 6 decimal point support in fractional feedback divider */ - fb_div = dal_fixed32_32_from_fraction( + fb_div = dc_fixpt_from_fraction( pll_settings->fract_feedback_divider, 1000000); - fb_div = dal_fixed32_32_add_int(fb_div, pll_settings->feedback_divider); + fb_div = dc_fixpt_add_int(fb_div, pll_settings->feedback_divider); ds_data->ds_frac_amount = 0; /*spreadSpectrumPercentage is in the unit of .01%, * so have to divided by 100 * 100*/ - ss_amount = dal_fixed32_32_mul( - fb_div, dal_fixed32_32_from_fraction(ss_data->percentage, + ss_amount = dc_fixpt_mul( + fb_div, dc_fixpt_from_fraction(ss_data->percentage, 100 * ss_data->percentage_divider)); - ds_data->feedback_amount = dal_fixed32_32_floor(ss_amount); + ds_data->feedback_amount = dc_fixpt_floor(ss_amount); - ss_nslip_amount = dal_fixed32_32_sub(ss_amount, - dal_fixed32_32_from_int(ds_data->feedback_amount)); - ss_nslip_amount = dal_fixed32_32_mul_int(ss_nslip_amount, 10); - ds_data->nfrac_amount = dal_fixed32_32_floor(ss_nslip_amount); + ss_nslip_amount = dc_fixpt_sub(ss_amount, + dc_fixpt_from_int(ds_data->feedback_amount)); + ss_nslip_amount = dc_fixpt_mul_int(ss_nslip_amount, 10); + ds_data->nfrac_amount = dc_fixpt_floor(ss_nslip_amount); - ss_ds_frac_amount = dal_fixed32_32_sub(ss_nslip_amount, - dal_fixed32_32_from_int(ds_data->nfrac_amount)); - ss_ds_frac_amount = dal_fixed32_32_mul_int(ss_ds_frac_amount, 65536); - ds_data->ds_frac_amount = dal_fixed32_32_floor(ss_ds_frac_amount); + ss_ds_frac_amount = dc_fixpt_sub(ss_nslip_amount, + dc_fixpt_from_int(ds_data->nfrac_amount)); + ss_ds_frac_amount = dc_fixpt_mul_int(ss_ds_frac_amount, 65536); + ds_data->ds_frac_amount = dc_fixpt_floor(ss_ds_frac_amount); /* compute SS_STEP_SIZE_DSFRAC */ - modulation_time = dal_fixed32_32_from_fraction( + modulation_time = dc_fixpt_from_fraction( pll_settings->reference_freq * 1000, pll_settings->reference_divider * ss_data->modulation_freq_hz); if (ss_data->flags.CENTER_SPREAD) - modulation_time = dal_fixed32_32_div_int(modulation_time, 4); + modulation_time = dc_fixpt_div_int(modulation_time, 4); else - modulation_time = dal_fixed32_32_div_int(modulation_time, 2); + modulation_time = dc_fixpt_div_int(modulation_time, 2); - ss_step_size = dal_fixed32_32_div(ss_amount, modulation_time); + ss_step_size = dc_fixpt_div(ss_amount, modulation_time); /* SS_STEP_SIZE_DSFRAC_DEC = Int(SS_STEP_SIZE * 2 ^ 16 * 10)*/ - ss_step_size = dal_fixed32_32_mul_int(ss_step_size, 65536 * 10); - ds_data->ds_frac_size = dal_fixed32_32_floor(ss_step_size); + ss_step_size = dc_fixpt_mul_int(ss_step_size, 65536 * 10); + ds_data->ds_frac_size = dc_fixpt_floor(ss_step_size); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c index f043e5ea412c..8a581c67bf2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c @@ -26,7 +26,7 @@ #include "dce_clocks.h" #include "dm_services.h" #include "reg_helper.h" -#include "fixed32_32.h" +#include "fixed31_32.h" #include "bios_parser_interface.h" #include "dc.h" #include "dmcu.h" @@ -228,19 +228,19 @@ static int dce_clocks_get_dp_ref_freq(struct display_clock *clk) generated according to average value (case as with previous ASICs) */ if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) { - struct fixed32_32 ss_percentage = dal_fixed32_32_div_int( - dal_fixed32_32_from_fraction( + struct fixed31_32 ss_percentage = dc_fixpt_div_int( + dc_fixpt_from_fraction( clk_dce->dprefclk_ss_percentage, clk_dce->dprefclk_ss_divider), 200); - struct fixed32_32 adj_dp_ref_clk_khz; + struct fixed31_32 adj_dp_ref_clk_khz; - ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one, + ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); adj_dp_ref_clk_khz = - dal_fixed32_32_mul_int( + dc_fixpt_mul_int( ss_percentage, dp_ref_clk_khz); - dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz); + dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); } return dp_ref_clk_khz; @@ -256,19 +256,19 @@ static int dce_clocks_get_dp_ref_freq_wrkaround(struct display_clock *clk) int dp_ref_clk_khz = 600000; if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) { - struct fixed32_32 ss_percentage = dal_fixed32_32_div_int( - dal_fixed32_32_from_fraction( + struct fixed31_32 ss_percentage = dc_fixpt_div_int( + dc_fixpt_from_fraction( clk_dce->dprefclk_ss_percentage, clk_dce->dprefclk_ss_divider), 200); - struct fixed32_32 adj_dp_ref_clk_khz; + struct fixed31_32 adj_dp_ref_clk_khz; - ss_percentage = dal_fixed32_32_sub(dal_fixed32_32_one, + ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage); adj_dp_ref_clk_khz = - dal_fixed32_32_mul_int( + dc_fixpt_mul_int( ss_percentage, dp_ref_clk_khz); - dp_ref_clk_khz = dal_fixed32_32_floor(adj_dp_ref_clk_khz); + dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz); } return dp_ref_clk_khz; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 2ee3d9bf1062..a576b8bbb3cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -28,7 +28,7 @@ #include "dce_dmcu.h" #include "dm_services.h" #include "reg_helper.h" -#include "fixed32_32.h" +#include "fixed31_32.h" #include "dc.h" #define TO_DCE_DMCU(dmcu)\ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c index d737e911971b..5d9506b3d46b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_ipp.c @@ -195,13 +195,13 @@ static void dce_ipp_program_input_lut( for (i = 0; i < gamma->num_entries; i++) { REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.red[i])); REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.green[i])); REG_SET(DC_LUT_SEQ_COLOR, 0, DC_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.blue[i])); } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c index 6243450b41b7..48862bebf29e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c @@ -1014,11 +1014,11 @@ static const uint16_t filter_8tap_64p_183[264] = { const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_3tap_16p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_3tap_16p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_3tap_16p_150; else return filter_3tap_16p_183; @@ -1026,11 +1026,11 @@ const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio) const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_3tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_3tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_3tap_64p_150; else return filter_3tap_64p_183; @@ -1038,11 +1038,11 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio) const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_4tap_16p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_4tap_16p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_4tap_16p_150; else return filter_4tap_16p_183; @@ -1050,11 +1050,11 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio) const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_4tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_4tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_4tap_64p_150; else return filter_4tap_64p_183; @@ -1062,11 +1062,11 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio) const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_5tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_5tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_5tap_64p_150; else return filter_5tap_64p_183; @@ -1074,11 +1074,11 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio) const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_6tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_6tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_6tap_64p_150; else return filter_6tap_64p_183; @@ -1086,11 +1086,11 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio) const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_7tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_7tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_7tap_64p_150; else return filter_7tap_64p_183; @@ -1098,11 +1098,11 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio) const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio) { - if (ratio.value < dal_fixed31_32_one.value) + if (ratio.value < dc_fixpt_one.value) return filter_8tap_64p_upscale; - else if (ratio.value < dal_fixed31_32_from_fraction(4, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(4, 3).value) return filter_8tap_64p_117; - else if (ratio.value < dal_fixed31_32_from_fraction(5, 3).value) + else if (ratio.value < dc_fixpt_from_fraction(5, 3).value) return filter_8tap_64p_150; else return filter_8tap_64p_183; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index e265a0abe361..0a6d483dc046 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -683,11 +683,11 @@ static void dce110_stream_encoder_set_mst_bandwidth( struct fixed31_32 avg_time_slots_per_mtp) { struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc); - uint32_t x = dal_fixed31_32_floor( + uint32_t x = dc_fixpt_floor( avg_time_slots_per_mtp); - uint32_t y = dal_fixed31_32_ceil( - dal_fixed31_32_shl( - dal_fixed31_32_sub_int( + uint32_t y = dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( avg_time_slots_per_mtp, x), 26)); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c index 832c5daada35..a02e719d7794 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c @@ -41,7 +41,7 @@ #define DC_LOGGER \ xfm_dce->base.ctx->logger -#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19)) +#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) #define GAMUT_MATRIX_SIZE 12 #define SCL_PHASES 16 @@ -256,27 +256,27 @@ static void calculate_inits( struct fixed31_32 v_init; inits->h_int_scale_ratio = - dal_fixed31_32_u2d19(data->ratios.horz) << 5; + dc_fixpt_u2d19(data->ratios.horz) << 5; inits->v_int_scale_ratio = - dal_fixed31_32_u2d19(data->ratios.vert) << 5; + dc_fixpt_u2d19(data->ratios.vert) << 5; h_init = - dal_fixed31_32_div_int( - dal_fixed31_32_add( + dc_fixpt_div_int( + dc_fixpt_add( data->ratios.horz, - dal_fixed31_32_from_int(data->taps.h_taps + 1)), + dc_fixpt_from_int(data->taps.h_taps + 1)), 2); - inits->h_init.integer = dal_fixed31_32_floor(h_init); - inits->h_init.fraction = dal_fixed31_32_u0d19(h_init) << 5; + inits->h_init.integer = dc_fixpt_floor(h_init); + inits->h_init.fraction = dc_fixpt_u0d19(h_init) << 5; v_init = - dal_fixed31_32_div_int( - dal_fixed31_32_add( + dc_fixpt_div_int( + dc_fixpt_add( data->ratios.vert, - dal_fixed31_32_from_int(data->taps.v_taps + 1)), + dc_fixpt_from_int(data->taps.v_taps + 1)), 2); - inits->v_init.integer = dal_fixed31_32_floor(v_init); - inits->v_init.fraction = dal_fixed31_32_u0d19(v_init) << 5; + inits->v_init.integer = dc_fixpt_floor(v_init); + inits->v_init.fraction = dc_fixpt_u0d19(v_init) << 5; } static void program_scl_ratios_inits( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index ae500421edb6..a92fb0aa2ff3 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -509,19 +509,19 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_start)); - arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_end)); + arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_start)); + arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_end)); y_r = rgb_resulted[0].red; y_g = rgb_resulted[0].green; y_b = rgb_resulted[0].blue; - y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b)); + y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); arr_points[0].y = y1_min; - arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, + arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); y_r = rgb_resulted[hw_points - 1].red; @@ -531,21 +531,21 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ - y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b)); + y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); arr_points[1].y = y3_max; - arr_points[1].slope = dal_fixed31_32_zero; + arr_points[1].slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, * and the slope to be such that we hit 1.0 at 10000 nits. */ - const struct fixed31_32 end_value = dal_fixed31_32_from_int(125); + const struct fixed31_32 end_value = dc_fixpt_from_int(125); - arr_points[1].slope = dal_fixed31_32_div( - dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y), - dal_fixed31_32_sub(end_value, arr_points[1].x)); + arr_points[1].slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), + dc_fixpt_sub(end_value, arr_points[1].x)); } regamma_params->hw_points_num = hw_points; @@ -569,16 +569,16 @@ dce110_translate_regamma_to_hw_format(const struct dc_transfer_func *output_tf, i = 1; while (i != hw_points + 1) { - if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red)) + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) rgb_plus_1->red = rgb->red; - if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green)) + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) rgb_plus_1->green = rgb->green; - if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue)) + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue); + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); ++rgb_plus_1; ++rgb; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c index 8ba3c12fc608..a7dce060204f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c @@ -373,13 +373,13 @@ static void calculate_inits( struct rect *chroma_viewport) { inits->h_int_scale_ratio_luma = - dal_fixed31_32_u2d19(data->ratios.horz) << 5; + dc_fixpt_u2d19(data->ratios.horz) << 5; inits->v_int_scale_ratio_luma = - dal_fixed31_32_u2d19(data->ratios.vert) << 5; + dc_fixpt_u2d19(data->ratios.vert) << 5; inits->h_int_scale_ratio_chroma = - dal_fixed31_32_u2d19(data->ratios.horz_c) << 5; + dc_fixpt_u2d19(data->ratios.horz_c) << 5; inits->v_int_scale_ratio_chroma = - dal_fixed31_32_u2d19(data->ratios.vert_c) << 5; + dc_fixpt_u2d19(data->ratios.vert_c) << 5; inits->h_init_luma.integer = 1; inits->v_init_luma.integer = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c index 96d5878e9ccd..5d95a997fd9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c @@ -169,7 +169,7 @@ bool cm_helper_convert_to_custom_float( } if (fixpoint == true) - arr_points[1].custom_float_y = dal_fixed31_32_clamp_u0d14(arr_points[1].y); + arr_points[1].custom_float_y = dc_fixpt_clamp_u0d14(arr_points[1].y); else if (!convert_to_custom_float_format(arr_points[1].y, &fmt, &arr_points[1].custom_float_y)) { BREAK_TO_DEBUGGER(); @@ -327,19 +327,19 @@ bool cm_helper_translate_curve_to_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_start)); - arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_end)); + arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_start)); + arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_end)); y_r = rgb_resulted[0].red; y_g = rgb_resulted[0].green; y_b = rgb_resulted[0].blue; - y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b)); + y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); arr_points[0].y = y1_min; - arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x); + arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); y_r = rgb_resulted[hw_points - 1].red; y_g = rgb_resulted[hw_points - 1].green; y_b = rgb_resulted[hw_points - 1].blue; @@ -347,22 +347,22 @@ bool cm_helper_translate_curve_to_hw_format( /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ - y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b)); + y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); arr_points[1].y = y3_max; - arr_points[1].slope = dal_fixed31_32_zero; + arr_points[1].slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, * and the slope to be such that we hit 1.0 at 10000 nits. */ const struct fixed31_32 end_value = - dal_fixed31_32_from_int(125); + dc_fixpt_from_int(125); - arr_points[1].slope = dal_fixed31_32_div( - dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y), - dal_fixed31_32_sub(end_value, arr_points[1].x)); + arr_points[1].slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), + dc_fixpt_sub(end_value, arr_points[1].x)); } lut_params->hw_points_num = hw_points; @@ -386,24 +386,24 @@ bool cm_helper_translate_curve_to_hw_format( i = 1; while (i != hw_points + 1) { - if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red)) + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) rgb_plus_1->red = rgb->red; - if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green)) + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) rgb_plus_1->green = rgb->green; - if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue)) + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue); + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); if (fixpoint == true) { - rgb->delta_red_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_red); - rgb->delta_green_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_green); - rgb->delta_blue_reg = dal_fixed31_32_clamp_u0d10(rgb->delta_blue); - rgb->red_reg = dal_fixed31_32_clamp_u0d14(rgb->red); - rgb->green_reg = dal_fixed31_32_clamp_u0d14(rgb->green); - rgb->blue_reg = dal_fixed31_32_clamp_u0d14(rgb->blue); + rgb->delta_red_reg = dc_fixpt_clamp_u0d10(rgb->delta_red); + rgb->delta_green_reg = dc_fixpt_clamp_u0d10(rgb->delta_green); + rgb->delta_blue_reg = dc_fixpt_clamp_u0d10(rgb->delta_blue); + rgb->red_reg = dc_fixpt_clamp_u0d14(rgb->red); + rgb->green_reg = dc_fixpt_clamp_u0d14(rgb->green); + rgb->blue_reg = dc_fixpt_clamp_u0d14(rgb->blue); } ++rgb_plus_1; @@ -489,19 +489,19 @@ bool cm_helper_translate_curve_to_degamma_hw_format( rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index]; rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index]; - arr_points[0].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_start)); - arr_points[1].x = dal_fixed31_32_pow(dal_fixed31_32_from_int(2), - dal_fixed31_32_from_int(region_end)); + arr_points[0].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_start)); + arr_points[1].x = dc_fixpt_pow(dc_fixpt_from_int(2), + dc_fixpt_from_int(region_end)); y_r = rgb_resulted[0].red; y_g = rgb_resulted[0].green; y_b = rgb_resulted[0].blue; - y1_min = dal_fixed31_32_min(y_r, dal_fixed31_32_min(y_g, y_b)); + y1_min = dc_fixpt_min(y_r, dc_fixpt_min(y_g, y_b)); arr_points[0].y = y1_min; - arr_points[0].slope = dal_fixed31_32_div(arr_points[0].y, arr_points[0].x); + arr_points[0].slope = dc_fixpt_div(arr_points[0].y, arr_points[0].x); y_r = rgb_resulted[hw_points - 1].red; y_g = rgb_resulted[hw_points - 1].green; y_b = rgb_resulted[hw_points - 1].blue; @@ -509,22 +509,22 @@ bool cm_helper_translate_curve_to_degamma_hw_format( /* see comment above, m_arrPoints[1].y should be the Y value for the * region end (m_numOfHwPoints), not last HW point(m_numOfHwPoints - 1) */ - y3_max = dal_fixed31_32_max(y_r, dal_fixed31_32_max(y_g, y_b)); + y3_max = dc_fixpt_max(y_r, dc_fixpt_max(y_g, y_b)); arr_points[1].y = y3_max; - arr_points[1].slope = dal_fixed31_32_zero; + arr_points[1].slope = dc_fixpt_zero; if (output_tf->tf == TRANSFER_FUNCTION_PQ) { /* for PQ, we want to have a straight line from last HW X point, * and the slope to be such that we hit 1.0 at 10000 nits. */ const struct fixed31_32 end_value = - dal_fixed31_32_from_int(125); + dc_fixpt_from_int(125); - arr_points[1].slope = dal_fixed31_32_div( - dal_fixed31_32_sub(dal_fixed31_32_one, arr_points[1].y), - dal_fixed31_32_sub(end_value, arr_points[1].x)); + arr_points[1].slope = dc_fixpt_div( + dc_fixpt_sub(dc_fixpt_one, arr_points[1].y), + dc_fixpt_sub(end_value, arr_points[1].x)); } lut_params->hw_points_num = hw_points; @@ -548,16 +548,16 @@ bool cm_helper_translate_curve_to_degamma_hw_format( i = 1; while (i != hw_points + 1) { - if (dal_fixed31_32_lt(rgb_plus_1->red, rgb->red)) + if (dc_fixpt_lt(rgb_plus_1->red, rgb->red)) rgb_plus_1->red = rgb->red; - if (dal_fixed31_32_lt(rgb_plus_1->green, rgb->green)) + if (dc_fixpt_lt(rgb_plus_1->green, rgb->green)) rgb_plus_1->green = rgb->green; - if (dal_fixed31_32_lt(rgb_plus_1->blue, rgb->blue)) + if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue)) rgb_plus_1->blue = rgb->blue; - rgb->delta_red = dal_fixed31_32_sub(rgb_plus_1->red, rgb->red); - rgb->delta_green = dal_fixed31_32_sub(rgb_plus_1->green, rgb->green); - rgb->delta_blue = dal_fixed31_32_sub(rgb_plus_1->blue, rgb->blue); + rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red); + rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green); + rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue); ++rgb_plus_1; ++rgb; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 8c4d9e523331..20796da36de4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -130,7 +130,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) /* Gamut remap in bypass */ } -#define IDENTITY_RATIO(ratio) (dal_fixed31_32_u2d19(ratio) == (1 << 19)) +#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) bool dpp_get_optimal_number_of_taps( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c index 4f373c97804f..116977eb24e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c @@ -811,13 +811,13 @@ void dpp1_program_input_lut( REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0); for (i = 0; i < gamma->num_entries; i++) { REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.red[i])); REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.green[i])); REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR, - dal_fixed31_32_round( + dc_fixpt_round( gamma->entries.blue[i])); } // Power off LUT memory diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 3eb824debf43..4ddd6273d5a5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -169,7 +169,7 @@ static enum dscl_mode_sel dpp1_dscl_get_dscl_mode( const struct scaler_data *data, bool dbg_always_scale) { - const long long one = dal_fixed31_32_one.value; + const long long one = dc_fixpt_one.value; if (dpp_base->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT) { /* DSCL is processing data in fixed format */ @@ -464,8 +464,8 @@ static enum lb_memory_config dpp1_dscl_find_lb_memory_config(struct dcn10_dpp *d int num_part_y, num_part_c; int vtaps = scl_data->taps.v_taps; int vtaps_c = scl_data->taps.v_taps_c; - int ceil_vratio = dal_fixed31_32_ceil(scl_data->ratios.vert); - int ceil_vratio_c = dal_fixed31_32_ceil(scl_data->ratios.vert_c); + int ceil_vratio = dc_fixpt_ceil(scl_data->ratios.vert); + int ceil_vratio_c = dc_fixpt_ceil(scl_data->ratios.vert_c); enum lb_memory_config mem_cfg = LB_MEMORY_CONFIG_0; if (dpp->base.ctx->dc->debug.use_max_lb) @@ -565,52 +565,52 @@ static void dpp1_dscl_set_manual_ratio_init( uint32_t init_int = 0; REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, - SCL_H_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.horz) << 5); + SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, - SCL_V_SCALE_RATIO, dal_fixed31_32_u2d19(data->ratios.vert) << 5); + SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5); REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, - SCL_H_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.horz_c) << 5); + SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, - SCL_V_SCALE_RATIO_C, dal_fixed31_32_u2d19(data->ratios.vert_c) << 5); + SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5); /* * 0.24 format for fraction, first five bits zeroed */ - init_frac = dal_fixed31_32_u0d19(data->inits.h) << 5; - init_int = dal_fixed31_32_floor(data->inits.h); + init_frac = dc_fixpt_u0d19(data->inits.h) << 5; + init_int = dc_fixpt_floor(data->inits.h); REG_SET_2(SCL_HORZ_FILTER_INIT, 0, SCL_H_INIT_FRAC, init_frac, SCL_H_INIT_INT, init_int); - init_frac = dal_fixed31_32_u0d19(data->inits.h_c) << 5; - init_int = dal_fixed31_32_floor(data->inits.h_c); + init_frac = dc_fixpt_u0d19(data->inits.h_c) << 5; + init_int = dc_fixpt_floor(data->inits.h_c); REG_SET_2(SCL_HORZ_FILTER_INIT_C, 0, SCL_H_INIT_FRAC_C, init_frac, SCL_H_INIT_INT_C, init_int); - init_frac = dal_fixed31_32_u0d19(data->inits.v) << 5; - init_int = dal_fixed31_32_floor(data->inits.v); + init_frac = dc_fixpt_u0d19(data->inits.v) << 5; + init_int = dc_fixpt_floor(data->inits.v); REG_SET_2(SCL_VERT_FILTER_INIT, 0, SCL_V_INIT_FRAC, init_frac, SCL_V_INIT_INT, init_int); - init_frac = dal_fixed31_32_u0d19(data->inits.v_bot) << 5; - init_int = dal_fixed31_32_floor(data->inits.v_bot); + init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5; + init_int = dc_fixpt_floor(data->inits.v_bot); REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0, SCL_V_INIT_FRAC_BOT, init_frac, SCL_V_INIT_INT_BOT, init_int); - init_frac = dal_fixed31_32_u0d19(data->inits.v_c) << 5; - init_int = dal_fixed31_32_floor(data->inits.v_c); + init_frac = dc_fixpt_u0d19(data->inits.v_c) << 5; + init_int = dc_fixpt_floor(data->inits.v_c); REG_SET_2(SCL_VERT_FILTER_INIT_C, 0, SCL_V_INIT_FRAC_C, init_frac, SCL_V_INIT_INT_C, init_int); - init_frac = dal_fixed31_32_u0d19(data->inits.v_c_bot) << 5; - init_int = dal_fixed31_32_floor(data->inits.v_c_bot); + init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5; + init_int = dc_fixpt_floor(data->inits.v_c_bot); REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0, SCL_V_INIT_FRAC_BOT_C, init_frac, SCL_V_INIT_INT_BOT_C, init_int); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 0cbc83edd37f..185f93bda41b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -1054,8 +1054,8 @@ void hubp1_cursor_set_position( ASSERT(param->h_scale_ratio.value); if (param->h_scale_ratio.value) - dst_x_offset = dal_fixed31_32_floor(dal_fixed31_32_div( - dal_fixed31_32_from_int(dst_x_offset), + dst_x_offset = dc_fixpt_floor(dc_fixpt_div( + dc_fixpt_from_int(dst_x_offset), param->h_scale_ratio)); if (src_x_offset >= (int)param->viewport_width) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 8adb8dc44af5..50bd7548e230 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1685,22 +1685,22 @@ static uint16_t fixed_point_to_int_frac( uint16_t result; - uint16_t d = (uint16_t)dal_fixed31_32_floor( - dal_fixed31_32_abs( + uint16_t d = (uint16_t)dc_fixpt_floor( + dc_fixpt_abs( arg)); if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor)) - numerator = (uint16_t)dal_fixed31_32_floor( - dal_fixed31_32_mul_int( + numerator = (uint16_t)dc_fixpt_floor( + dc_fixpt_mul_int( arg, divisor)); else { - numerator = dal_fixed31_32_floor( - dal_fixed31_32_sub( - dal_fixed31_32_from_int( + numerator = dc_fixpt_floor( + dc_fixpt_sub( + dc_fixpt_from_int( 1LL << integer_bits), - dal_fixed31_32_recip( - dal_fixed31_32_from_int( + dc_fixpt_recip( + dc_fixpt_from_int( divisor)))); } @@ -1710,8 +1710,8 @@ static uint16_t fixed_point_to_int_frac( result = (uint16_t)( (1 << (integer_bits + fractional_bits + 1)) + numerator); - if ((result != 0) && dal_fixed31_32_lt( - arg, dal_fixed31_32_zero)) + if ((result != 0) && dc_fixpt_lt( + arg, dc_fixpt_zero)) result |= 1 << (integer_bits + fractional_bits); return result; @@ -1725,8 +1725,8 @@ void build_prescale_params(struct dc_bias_and_scale *bias_and_scale, && plane_state->input_csc_color_matrix.enable_adjustment && plane_state->coeff_reduction_factor.value != 0) { bias_and_scale->scale_blue = fixed_point_to_int_frac( - dal_fixed31_32_mul(plane_state->coeff_reduction_factor, - dal_fixed31_32_from_fraction(256, 255)), + dc_fixpt_mul(plane_state->coeff_reduction_factor, + dc_fixpt_from_fraction(256, 255)), 2, 13); bias_and_scale->scale_red = bias_and_scale->scale_blue; @@ -1995,7 +1995,7 @@ static void dcn10_blank_pixel_data( static void set_hdr_multiplier(struct pipe_ctx *pipe_ctx) { - struct fixed31_32 multiplier = dal_fixed31_32_from_fraction( + struct fixed31_32 multiplier = dc_fixpt_from_fraction( pipe_ctx->plane_state->sdr_white_level, 80); uint32_t hw_mult = 0x1f000; // 1.0 default multiplier struct custom_float_format fmt; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index befd8639ad55..653b7b2efe2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -603,11 +603,11 @@ void enc1_stream_encoder_set_mst_bandwidth( struct fixed31_32 avg_time_slots_per_mtp) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - uint32_t x = dal_fixed31_32_floor( + uint32_t x = dc_fixpt_floor( avg_time_slots_per_mtp); - uint32_t y = dal_fixed31_32_ceil( - dal_fixed31_32_shl( - dal_fixed31_32_sub_int( + uint32_t y = dc_fixpt_ceil( + dc_fixpt_shl( + dc_fixpt_sub_int( avg_time_slots_per_mtp, x), 26)); diff --git a/drivers/gpu/drm/amd/display/dc/irq_types.h b/drivers/gpu/drm/amd/display/dc/irq_types.h index cc3b1bc6cedd..0b5f3a278c22 100644 --- a/drivers/gpu/drm/amd/display/dc/irq_types.h +++ b/drivers/gpu/drm/amd/display/dc/irq_types.h @@ -26,6 +26,8 @@ #ifndef __DAL_IRQ_TYPES_H__ #define __DAL_IRQ_TYPES_H__ +#include "os_types.h" + struct dc_context; typedef void (*interrupt_handler)(void *); diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 16cbdb43d856..b5b8d7dea373 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -50,16 +50,16 @@ struct fixed31_32 { * Useful constants */ -static const struct fixed31_32 dal_fixed31_32_zero = { 0 }; -static const struct fixed31_32 dal_fixed31_32_epsilon = { 1LL }; -static const struct fixed31_32 dal_fixed31_32_half = { 0x80000000LL }; -static const struct fixed31_32 dal_fixed31_32_one = { 0x100000000LL }; +static const struct fixed31_32 dc_fixpt_zero = { 0 }; +static const struct fixed31_32 dc_fixpt_epsilon = { 1LL }; +static const struct fixed31_32 dc_fixpt_half = { 0x80000000LL }; +static const struct fixed31_32 dc_fixpt_one = { 0x100000000LL }; -static const struct fixed31_32 dal_fixed31_32_pi = { 13493037705LL }; -static const struct fixed31_32 dal_fixed31_32_two_pi = { 26986075409LL }; -static const struct fixed31_32 dal_fixed31_32_e = { 11674931555LL }; -static const struct fixed31_32 dal_fixed31_32_ln2 = { 2977044471LL }; -static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL }; +static const struct fixed31_32 dc_fixpt_pi = { 13493037705LL }; +static const struct fixed31_32 dc_fixpt_two_pi = { 26986075409LL }; +static const struct fixed31_32 dc_fixpt_e = { 11674931555LL }; +static const struct fixed31_32 dc_fixpt_ln2 = { 2977044471LL }; +static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL }; /* * @brief @@ -70,7 +70,7 @@ static const struct fixed31_32 dal_fixed31_32_ln2_div_2 = { 1488522236LL }; * @brief * result = numerator / denominator */ -struct fixed31_32 dal_fixed31_32_from_fraction( +struct fixed31_32 dc_fixpt_from_fraction( long long numerator, long long denominator); @@ -78,8 +78,8 @@ struct fixed31_32 dal_fixed31_32_from_fraction( * @brief * result = arg */ -struct fixed31_32 dal_fixed31_32_from_int_nonconst(long long arg); -static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg) +struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg); +static inline struct fixed31_32 dc_fixpt_from_int(long long arg) { if (__builtin_constant_p(arg)) { struct fixed31_32 res; @@ -87,7 +87,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg) res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART; return res; } else - return dal_fixed31_32_from_int_nonconst(arg); + return dc_fixpt_from_int_nonconst(arg); } /* @@ -99,7 +99,7 @@ static inline struct fixed31_32 dal_fixed31_32_from_int(long long arg) * @brief * result = -arg */ -static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg) +static inline struct fixed31_32 dc_fixpt_neg(struct fixed31_32 arg) { struct fixed31_32 res; @@ -112,10 +112,10 @@ static inline struct fixed31_32 dal_fixed31_32_neg(struct fixed31_32 arg) * @brief * result = abs(arg) := (arg >= 0) ? arg : -arg */ -static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg) +static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg) { if (arg.value < 0) - return dal_fixed31_32_neg(arg); + return dc_fixpt_neg(arg); else return arg; } @@ -129,7 +129,7 @@ static inline struct fixed31_32 dal_fixed31_32_abs(struct fixed31_32 arg) * @brief * result = arg1 < arg2 */ -static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1, +static inline bool dc_fixpt_lt(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value < arg2.value; @@ -139,7 +139,7 @@ static inline bool dal_fixed31_32_lt(struct fixed31_32 arg1, * @brief * result = arg1 <= arg2 */ -static inline bool dal_fixed31_32_le(struct fixed31_32 arg1, +static inline bool dc_fixpt_le(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value <= arg2.value; @@ -149,7 +149,7 @@ static inline bool dal_fixed31_32_le(struct fixed31_32 arg1, * @brief * result = arg1 == arg2 */ -static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1, +static inline bool dc_fixpt_eq(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value == arg2.value; @@ -159,7 +159,7 @@ static inline bool dal_fixed31_32_eq(struct fixed31_32 arg1, * @brief * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2 */ -static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, struct fixed31_32 arg2) { if (arg1.value <= arg2.value) @@ -172,7 +172,7 @@ static inline struct fixed31_32 dal_fixed31_32_min(struct fixed31_32 arg1, * @brief * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1 */ -static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1, struct fixed31_32 arg2) { if (arg1.value <= arg2.value) @@ -187,14 +187,14 @@ static inline struct fixed31_32 dal_fixed31_32_max(struct fixed31_32 arg1, * result = | arg, when min_value < arg < max_value * | max_value, when arg >= max_value */ -static inline struct fixed31_32 dal_fixed31_32_clamp( +static inline struct fixed31_32 dc_fixpt_clamp( struct fixed31_32 arg, struct fixed31_32 min_value, struct fixed31_32 max_value) { - if (dal_fixed31_32_le(arg, min_value)) + if (dc_fixpt_le(arg, min_value)) return min_value; - else if (dal_fixed31_32_le(max_value, arg)) + else if (dc_fixpt_le(max_value, arg)) return max_value; else return arg; @@ -209,7 +209,7 @@ static inline struct fixed31_32 dal_fixed31_32_clamp( * @brief * result = arg << shift */ -struct fixed31_32 dal_fixed31_32_shl( +struct fixed31_32 dc_fixpt_shl( struct fixed31_32 arg, unsigned char shift); @@ -217,7 +217,7 @@ struct fixed31_32 dal_fixed31_32_shl( * @brief * result = arg >> shift */ -static inline struct fixed31_32 dal_fixed31_32_shr( +static inline struct fixed31_32 dc_fixpt_shr( struct fixed31_32 arg, unsigned char shift) { @@ -235,7 +235,7 @@ static inline struct fixed31_32 dal_fixed31_32_shr( * @brief * result = arg1 + arg2 */ -struct fixed31_32 dal_fixed31_32_add( +struct fixed31_32 dc_fixpt_add( struct fixed31_32 arg1, struct fixed31_32 arg2); @@ -243,18 +243,18 @@ struct fixed31_32 dal_fixed31_32_add( * @brief * result = arg1 + arg2 */ -static inline struct fixed31_32 dal_fixed31_32_add_int(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1, int arg2) { - return dal_fixed31_32_add(arg1, - dal_fixed31_32_from_int(arg2)); + return dc_fixpt_add(arg1, + dc_fixpt_from_int(arg2)); } /* * @brief * result = arg1 - arg2 */ -struct fixed31_32 dal_fixed31_32_sub( +struct fixed31_32 dc_fixpt_sub( struct fixed31_32 arg1, struct fixed31_32 arg2); @@ -262,11 +262,11 @@ struct fixed31_32 dal_fixed31_32_sub( * @brief * result = arg1 - arg2 */ -static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, int arg2) { - return dal_fixed31_32_sub(arg1, - dal_fixed31_32_from_int(arg2)); + return dc_fixpt_sub(arg1, + dc_fixpt_from_int(arg2)); } @@ -279,7 +279,7 @@ static inline struct fixed31_32 dal_fixed31_32_sub_int(struct fixed31_32 arg1, * @brief * result = arg1 * arg2 */ -struct fixed31_32 dal_fixed31_32_mul( +struct fixed31_32 dc_fixpt_mul( struct fixed31_32 arg1, struct fixed31_32 arg2); @@ -288,39 +288,39 @@ struct fixed31_32 dal_fixed31_32_mul( * @brief * result = arg1 * arg2 */ -static inline struct fixed31_32 dal_fixed31_32_mul_int(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1, int arg2) { - return dal_fixed31_32_mul(arg1, - dal_fixed31_32_from_int(arg2)); + return dc_fixpt_mul(arg1, + dc_fixpt_from_int(arg2)); } /* * @brief * result = square(arg) := arg * arg */ -struct fixed31_32 dal_fixed31_32_sqr( +struct fixed31_32 dc_fixpt_sqr( struct fixed31_32 arg); /* * @brief * result = arg1 / arg2 */ -static inline struct fixed31_32 dal_fixed31_32_div_int(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1, long long arg2) { - return dal_fixed31_32_from_fraction(arg1.value, - dal_fixed31_32_from_int(arg2).value); + return dc_fixpt_from_fraction(arg1.value, + dc_fixpt_from_int(arg2).value); } /* * @brief * result = arg1 / arg2 */ -static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1, +static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, struct fixed31_32 arg2) { - return dal_fixed31_32_from_fraction(arg1.value, + return dc_fixpt_from_fraction(arg1.value, arg2.value); } @@ -336,7 +336,7 @@ static inline struct fixed31_32 dal_fixed31_32_div(struct fixed31_32 arg1, * @note * No special actions taken in case argument is zero. */ -struct fixed31_32 dal_fixed31_32_recip( +struct fixed31_32 dc_fixpt_recip( struct fixed31_32 arg); /* @@ -352,7 +352,7 @@ struct fixed31_32 dal_fixed31_32_recip( * Argument specified in radians, * internally it's normalized to [-2pi...2pi] range. */ -struct fixed31_32 dal_fixed31_32_sinc( +struct fixed31_32 dc_fixpt_sinc( struct fixed31_32 arg); /* @@ -363,7 +363,7 @@ struct fixed31_32 dal_fixed31_32_sinc( * Argument specified in radians, * internally it's normalized to [-2pi...2pi] range. */ -struct fixed31_32 dal_fixed31_32_sin( +struct fixed31_32 dc_fixpt_sin( struct fixed31_32 arg); /* @@ -376,7 +376,7 @@ struct fixed31_32 dal_fixed31_32_sin( * passing arguments outside that range * will cause incorrect result! */ -struct fixed31_32 dal_fixed31_32_cos( +struct fixed31_32 dc_fixpt_cos( struct fixed31_32 arg); /* @@ -391,7 +391,7 @@ struct fixed31_32 dal_fixed31_32_cos( * @note * Currently, function is verified for abs(arg) <= 1. */ -struct fixed31_32 dal_fixed31_32_exp( +struct fixed31_32 dc_fixpt_exp( struct fixed31_32 arg); /* @@ -404,7 +404,7 @@ struct fixed31_32 dal_fixed31_32_exp( * Currently, no special actions taken * in case of invalid argument(s). Take care! */ -struct fixed31_32 dal_fixed31_32_log( +struct fixed31_32 dc_fixpt_log( struct fixed31_32 arg); /* @@ -419,7 +419,7 @@ struct fixed31_32 dal_fixed31_32_log( * @note * Currently, abs(arg1) should be less than 1. Take care! */ -struct fixed31_32 dal_fixed31_32_pow( +struct fixed31_32 dc_fixpt_pow( struct fixed31_32 arg1, struct fixed31_32 arg2); @@ -432,21 +432,21 @@ struct fixed31_32 dal_fixed31_32_pow( * @brief * result = floor(arg) := greatest integer lower than or equal to arg */ -int dal_fixed31_32_floor( +int dc_fixpt_floor( struct fixed31_32 arg); /* * @brief * result = round(arg) := integer nearest to arg */ -int dal_fixed31_32_round( +int dc_fixpt_round( struct fixed31_32 arg); /* * @brief * result = ceil(arg) := lowest integer greater than or equal to arg */ -int dal_fixed31_32_ceil( +int dc_fixpt_ceil( struct fixed31_32 arg); /* the following two function are used in scaler hw programming to convert fixed @@ -455,20 +455,20 @@ int dal_fixed31_32_ceil( * fractional */ -unsigned int dal_fixed31_32_u2d19( +unsigned int dc_fixpt_u2d19( struct fixed31_32 arg); -unsigned int dal_fixed31_32_u0d19( +unsigned int dc_fixpt_u0d19( struct fixed31_32 arg); -unsigned int dal_fixed31_32_clamp_u0d14( +unsigned int dc_fixpt_clamp_u0d14( struct fixed31_32 arg); -unsigned int dal_fixed31_32_clamp_u0d10( +unsigned int dc_fixpt_clamp_u0d10( struct fixed31_32 arg); -int dal_fixed31_32_s4d19( +int dc_fixpt_s4d19( struct fixed31_32 arg); #endif diff --git a/drivers/gpu/drm/amd/display/include/fixed32_32.h b/drivers/gpu/drm/amd/display/include/fixed32_32.h deleted file mode 100644 index 9c70341fe026..000000000000 --- a/drivers/gpu/drm/amd/display/include/fixed32_32.h +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2012-15 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - - -#ifndef __DAL_FIXED32_32_H__ -#define __DAL_FIXED32_32_H__ - -#include "os_types.h" - -struct fixed32_32 { - uint64_t value; -}; - -static const struct fixed32_32 dal_fixed32_32_zero = { 0 }; -static const struct fixed32_32 dal_fixed32_32_one = { 0x100000000LL }; -static const struct fixed32_32 dal_fixed32_32_half = { 0x80000000LL }; - -struct fixed32_32 dal_fixed32_32_from_fraction(uint32_t n, uint32_t d); -static inline struct fixed32_32 dal_fixed32_32_from_int(uint32_t value) -{ - struct fixed32_32 fx; - - fx.value = (uint64_t)value<<32; - return fx; -} - -struct fixed32_32 dal_fixed32_32_add( - struct fixed32_32 lhs, - struct fixed32_32 rhs); -struct fixed32_32 dal_fixed32_32_add_int( - struct fixed32_32 lhs, - uint32_t rhs); -struct fixed32_32 dal_fixed32_32_sub( - struct fixed32_32 lhs, - struct fixed32_32 rhs); -struct fixed32_32 dal_fixed32_32_sub_int( - struct fixed32_32 lhs, - uint32_t rhs); -struct fixed32_32 dal_fixed32_32_mul( - struct fixed32_32 lhs, - struct fixed32_32 rhs); -struct fixed32_32 dal_fixed32_32_mul_int( - struct fixed32_32 lhs, - uint32_t rhs); -struct fixed32_32 dal_fixed32_32_div( - struct fixed32_32 lhs, - struct fixed32_32 rhs); -struct fixed32_32 dal_fixed32_32_div_int( - struct fixed32_32 lhs, - uint32_t rhs); - -static inline struct fixed32_32 dal_fixed32_32_min(struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - return (lhs.value < rhs.value) ? lhs : rhs; -} - -static inline struct fixed32_32 dal_fixed32_32_max(struct fixed32_32 lhs, - struct fixed32_32 rhs) -{ - return (lhs.value > rhs.value) ? lhs : rhs; -} - -static inline bool dal_fixed32_32_gt(struct fixed32_32 lhs, struct fixed32_32 rhs) -{ - return lhs.value > rhs.value; -} - -static inline bool dal_fixed32_32_gt_int(struct fixed32_32 lhs, uint32_t rhs) -{ - return lhs.value > ((uint64_t)rhs<<32); -} - -static inline bool dal_fixed32_32_lt(struct fixed32_32 lhs, struct fixed32_32 rhs) -{ - return lhs.value < rhs.value; -} - -static inline bool dal_fixed32_32_lt_int(struct fixed32_32 lhs, uint32_t rhs) -{ - return lhs.value < ((uint64_t)rhs<<32); -} - -static inline bool dal_fixed32_32_le(struct fixed32_32 lhs, struct fixed32_32 rhs) -{ - return lhs.value <= rhs.value; -} - -static inline bool dal_fixed32_32_le_int(struct fixed32_32 lhs, uint32_t rhs) -{ - return lhs.value <= ((uint64_t)rhs<<32); -} - -static inline bool dal_fixed32_32_eq(struct fixed32_32 lhs, struct fixed32_32 rhs) -{ - return lhs.value == rhs.value; -} - -uint32_t dal_fixed32_32_ceil(struct fixed32_32 value); -static inline uint32_t dal_fixed32_32_floor(struct fixed32_32 value) -{ - return value.value>>32; -} - -uint32_t dal_fixed32_32_round(struct fixed32_32 value); - -#endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 15e5b72e6e00..29d2ec82b924 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -43,7 +43,7 @@ static bool de_pq_initialized; /* = false; */ /* one-time setup of X points */ void setup_x_points_distribution(void) { - struct fixed31_32 region_size = dal_fixed31_32_from_int(128); + struct fixed31_32 region_size = dc_fixpt_from_int(128); int32_t segment; uint32_t seg_offset; uint32_t index; @@ -53,8 +53,8 @@ void setup_x_points_distribution(void) coordinates_x[MAX_HW_POINTS + 1].x = region_size; for (segment = 6; segment > (6 - NUM_REGIONS); segment--) { - region_size = dal_fixed31_32_div_int(region_size, 2); - increment = dal_fixed31_32_div_int(region_size, + region_size = dc_fixpt_div_int(region_size, 2); + increment = dc_fixpt_div_int(region_size, NUM_PTS_IN_REGION); seg_offset = (segment + (NUM_REGIONS - 7)) * NUM_PTS_IN_REGION; coordinates_x[seg_offset].x = region_size; @@ -62,7 +62,7 @@ void setup_x_points_distribution(void) for (index = seg_offset + 1; index < seg_offset + NUM_PTS_IN_REGION; index++) { - coordinates_x[index].x = dal_fixed31_32_add + coordinates_x[index].x = dc_fixpt_add (coordinates_x[index-1].x, increment); } } @@ -72,63 +72,63 @@ static void compute_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) { /* consts for PQ gamma formula. */ const struct fixed31_32 m1 = - dal_fixed31_32_from_fraction(159301758, 1000000000); + dc_fixpt_from_fraction(159301758, 1000000000); const struct fixed31_32 m2 = - dal_fixed31_32_from_fraction(7884375, 100000); + dc_fixpt_from_fraction(7884375, 100000); const struct fixed31_32 c1 = - dal_fixed31_32_from_fraction(8359375, 10000000); + dc_fixpt_from_fraction(8359375, 10000000); const struct fixed31_32 c2 = - dal_fixed31_32_from_fraction(188515625, 10000000); + dc_fixpt_from_fraction(188515625, 10000000); const struct fixed31_32 c3 = - dal_fixed31_32_from_fraction(186875, 10000); + dc_fixpt_from_fraction(186875, 10000); struct fixed31_32 l_pow_m1; struct fixed31_32 base; - if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero)) - in_x = dal_fixed31_32_zero; + if (dc_fixpt_lt(in_x, dc_fixpt_zero)) + in_x = dc_fixpt_zero; - l_pow_m1 = dal_fixed31_32_pow(in_x, m1); - base = dal_fixed31_32_div( - dal_fixed31_32_add(c1, - (dal_fixed31_32_mul(c2, l_pow_m1))), - dal_fixed31_32_add(dal_fixed31_32_one, - (dal_fixed31_32_mul(c3, l_pow_m1)))); - *out_y = dal_fixed31_32_pow(base, m2); + l_pow_m1 = dc_fixpt_pow(in_x, m1); + base = dc_fixpt_div( + dc_fixpt_add(c1, + (dc_fixpt_mul(c2, l_pow_m1))), + dc_fixpt_add(dc_fixpt_one, + (dc_fixpt_mul(c3, l_pow_m1)))); + *out_y = dc_fixpt_pow(base, m2); } static void compute_de_pq(struct fixed31_32 in_x, struct fixed31_32 *out_y) { /* consts for dePQ gamma formula. */ const struct fixed31_32 m1 = - dal_fixed31_32_from_fraction(159301758, 1000000000); + dc_fixpt_from_fraction(159301758, 1000000000); const struct fixed31_32 m2 = - dal_fixed31_32_from_fraction(7884375, 100000); + dc_fixpt_from_fraction(7884375, 100000); const struct fixed31_32 c1 = - dal_fixed31_32_from_fraction(8359375, 10000000); + dc_fixpt_from_fraction(8359375, 10000000); const struct fixed31_32 c2 = - dal_fixed31_32_from_fraction(188515625, 10000000); + dc_fixpt_from_fraction(188515625, 10000000); const struct fixed31_32 c3 = - dal_fixed31_32_from_fraction(186875, 10000); + dc_fixpt_from_fraction(186875, 10000); struct fixed31_32 l_pow_m1; struct fixed31_32 base, div; - if (dal_fixed31_32_lt(in_x, dal_fixed31_32_zero)) - in_x = dal_fixed31_32_zero; + if (dc_fixpt_lt(in_x, dc_fixpt_zero)) + in_x = dc_fixpt_zero; - l_pow_m1 = dal_fixed31_32_pow(in_x, - dal_fixed31_32_div(dal_fixed31_32_one, m2)); - base = dal_fixed31_32_sub(l_pow_m1, c1); + l_pow_m1 = dc_fixpt_pow(in_x, + dc_fixpt_div(dc_fixpt_one, m2)); + base = dc_fixpt_sub(l_pow_m1, c1); - if (dal_fixed31_32_lt(base, dal_fixed31_32_zero)) - base = dal_fixed31_32_zero; + if (dc_fixpt_lt(base, dc_fixpt_zero)) + base = dc_fixpt_zero; - div = dal_fixed31_32_sub(c2, dal_fixed31_32_mul(c3, l_pow_m1)); + div = dc_fixpt_sub(c2, dc_fixpt_mul(c3, l_pow_m1)); - *out_y = dal_fixed31_32_pow(dal_fixed31_32_div(base, div), - dal_fixed31_32_div(dal_fixed31_32_one, m1)); + *out_y = dc_fixpt_pow(dc_fixpt_div(base, div), + dc_fixpt_div(dc_fixpt_one, m1)); } /* one-time pre-compute PQ values - only for sdr_white_level 80 */ @@ -138,14 +138,14 @@ void precompute_pq(void) struct fixed31_32 x; const struct hw_x_point *coord_x = coordinates_x + 32; struct fixed31_32 scaling_factor = - dal_fixed31_32_from_fraction(80, 10000); + dc_fixpt_from_fraction(80, 10000); /* pow function has problems with arguments too small */ for (i = 0; i < 32; i++) - pq_table[i] = dal_fixed31_32_zero; + pq_table[i] = dc_fixpt_zero; for (i = 32; i <= MAX_HW_POINTS; i++) { - x = dal_fixed31_32_mul(coord_x->x, scaling_factor); + x = dc_fixpt_mul(coord_x->x, scaling_factor); compute_pq(x, &pq_table[i]); ++coord_x; } @@ -158,7 +158,7 @@ void precompute_de_pq(void) struct fixed31_32 y; uint32_t begin_index, end_index; - struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125); + struct fixed31_32 scaling_factor = dc_fixpt_from_int(125); /* X points is 2^-25 to 2^7 * De-gamma X is 2^-12 to 2^0 – we are skipping first -12-(-25) = 13 regions @@ -167,11 +167,11 @@ void precompute_de_pq(void) end_index = begin_index + 12 * NUM_PTS_IN_REGION; for (i = 0; i <= begin_index; i++) - de_pq_table[i] = dal_fixed31_32_zero; + de_pq_table[i] = dc_fixpt_zero; for (; i <= end_index; i++) { compute_de_pq(coordinates_x[i].x, &y); - de_pq_table[i] = dal_fixed31_32_mul(y, scaling_factor); + de_pq_table[i] = dc_fixpt_mul(y, scaling_factor); } for (; i <= MAX_HW_POINTS; i++) @@ -195,15 +195,15 @@ static void build_coefficients(struct gamma_coefficients *coefficients, bool is_ uint32_t index = is_2_4 == true ? 0:1; do { - coefficients->a0[i] = dal_fixed31_32_from_fraction( + coefficients->a0[i] = dc_fixpt_from_fraction( numerator01[index], 10000000); - coefficients->a1[i] = dal_fixed31_32_from_fraction( + coefficients->a1[i] = dc_fixpt_from_fraction( numerator02[index], 1000); - coefficients->a2[i] = dal_fixed31_32_from_fraction( + coefficients->a2[i] = dc_fixpt_from_fraction( numerator03[index], 1000); - coefficients->a3[i] = dal_fixed31_32_from_fraction( + coefficients->a3[i] = dc_fixpt_from_fraction( numerator04[index], 1000); - coefficients->user_gamma[i] = dal_fixed31_32_from_fraction( + coefficients->user_gamma[i] = dc_fixpt_from_fraction( numerator05[index], 1000); ++i; @@ -218,33 +218,33 @@ static struct fixed31_32 translate_from_linear_space( struct fixed31_32 a3, struct fixed31_32 gamma) { - const struct fixed31_32 one = dal_fixed31_32_from_int(1); + const struct fixed31_32 one = dc_fixpt_from_int(1); - if (dal_fixed31_32_lt(one, arg)) + if (dc_fixpt_lt(one, arg)) return one; - if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0))) - return dal_fixed31_32_sub( + if (dc_fixpt_le(arg, dc_fixpt_neg(a0))) + return dc_fixpt_sub( a2, - dal_fixed31_32_mul( - dal_fixed31_32_add( + dc_fixpt_mul( + dc_fixpt_add( one, a3), - dal_fixed31_32_pow( - dal_fixed31_32_neg(arg), - dal_fixed31_32_recip(gamma)))); - else if (dal_fixed31_32_le(a0, arg)) - return dal_fixed31_32_sub( - dal_fixed31_32_mul( - dal_fixed31_32_add( + dc_fixpt_pow( + dc_fixpt_neg(arg), + dc_fixpt_recip(gamma)))); + else if (dc_fixpt_le(a0, arg)) + return dc_fixpt_sub( + dc_fixpt_mul( + dc_fixpt_add( one, a3), - dal_fixed31_32_pow( + dc_fixpt_pow( arg, - dal_fixed31_32_recip(gamma))), + dc_fixpt_recip(gamma))), a2); else - return dal_fixed31_32_mul( + return dc_fixpt_mul( arg, a1); } @@ -259,25 +259,25 @@ static struct fixed31_32 translate_to_linear_space( { struct fixed31_32 linear; - a0 = dal_fixed31_32_mul(a0, a1); - if (dal_fixed31_32_le(arg, dal_fixed31_32_neg(a0))) + a0 = dc_fixpt_mul(a0, a1); + if (dc_fixpt_le(arg, dc_fixpt_neg(a0))) - linear = dal_fixed31_32_neg( - dal_fixed31_32_pow( - dal_fixed31_32_div( - dal_fixed31_32_sub(a2, arg), - dal_fixed31_32_add( - dal_fixed31_32_one, a3)), gamma)); + linear = dc_fixpt_neg( + dc_fixpt_pow( + dc_fixpt_div( + dc_fixpt_sub(a2, arg), + dc_fixpt_add( + dc_fixpt_one, a3)), gamma)); - else if (dal_fixed31_32_le(dal_fixed31_32_neg(a0), arg) && - dal_fixed31_32_le(arg, a0)) - linear = dal_fixed31_32_div(arg, a1); + else if (dc_fixpt_le(dc_fixpt_neg(a0), arg) && + dc_fixpt_le(arg, a0)) + linear = dc_fixpt_div(arg, a1); else - linear = dal_fixed31_32_pow( - dal_fixed31_32_div( - dal_fixed31_32_add(a2, arg), - dal_fixed31_32_add( - dal_fixed31_32_one, a3)), gamma); + linear = dc_fixpt_pow( + dc_fixpt_div( + dc_fixpt_add(a2, arg), + dc_fixpt_add( + dc_fixpt_one, a3)), gamma); return linear; } @@ -352,8 +352,8 @@ static bool find_software_points( right = axis_x[max_number - 1].b; } - if (dal_fixed31_32_le(left, hw_point) && - dal_fixed31_32_le(hw_point, right)) { + if (dc_fixpt_le(left, hw_point) && + dc_fixpt_le(hw_point, right)) { *index_to_start = i; *index_left = i; @@ -366,7 +366,7 @@ static bool find_software_points( return true; } else if ((i == *index_to_start) && - dal_fixed31_32_le(hw_point, left)) { + dc_fixpt_le(hw_point, left)) { *index_to_start = i; *index_left = i; *index_right = i; @@ -375,7 +375,7 @@ static bool find_software_points( return true; } else if ((i == max_number - 1) && - dal_fixed31_32_le(right, hw_point)) { + dc_fixpt_le(right, hw_point)) { *index_to_start = i; *index_left = i; *index_right = i; @@ -457,17 +457,17 @@ static bool build_custom_gamma_mapping_coefficients_worker( } if (hw_pos == HW_POINT_POSITION_MIDDLE) - point->coeff = dal_fixed31_32_div( - dal_fixed31_32_sub( + point->coeff = dc_fixpt_div( + dc_fixpt_sub( coord_x, left_pos), - dal_fixed31_32_sub( + dc_fixpt_sub( right_pos, left_pos)); else if (hw_pos == HW_POINT_POSITION_LEFT) - point->coeff = dal_fixed31_32_zero; + point->coeff = dc_fixpt_zero; else if (hw_pos == HW_POINT_POSITION_RIGHT) - point->coeff = dal_fixed31_32_from_int(2); + point->coeff = dc_fixpt_from_int(2); else { BREAK_TO_DEBUGGER(); return false; @@ -502,45 +502,45 @@ static struct fixed31_32 calculate_mapped_value( if ((point->left_index < 0) || (point->left_index > max_index)) { BREAK_TO_DEBUGGER(); - return dal_fixed31_32_zero; + return dc_fixpt_zero; } if ((point->right_index < 0) || (point->right_index > max_index)) { BREAK_TO_DEBUGGER(); - return dal_fixed31_32_zero; + return dc_fixpt_zero; } if (point->pos == HW_POINT_POSITION_MIDDLE) if (channel == CHANNEL_NAME_RED) - result = dal_fixed31_32_add( - dal_fixed31_32_mul( + result = dc_fixpt_add( + dc_fixpt_mul( point->coeff, - dal_fixed31_32_sub( + dc_fixpt_sub( rgb[point->right_index].r, rgb[point->left_index].r)), rgb[point->left_index].r); else if (channel == CHANNEL_NAME_GREEN) - result = dal_fixed31_32_add( - dal_fixed31_32_mul( + result = dc_fixpt_add( + dc_fixpt_mul( point->coeff, - dal_fixed31_32_sub( + dc_fixpt_sub( rgb[point->right_index].g, rgb[point->left_index].g)), rgb[point->left_index].g); else - result = dal_fixed31_32_add( - dal_fixed31_32_mul( + result = dc_fixpt_add( + dc_fixpt_mul( point->coeff, - dal_fixed31_32_sub( + dc_fixpt_sub( rgb[point->right_index].b, rgb[point->left_index].b)), rgb[point->left_index].b); else if (point->pos == HW_POINT_POSITION_LEFT) { BREAK_TO_DEBUGGER(); - result = dal_fixed31_32_zero; + result = dc_fixpt_zero; } else { BREAK_TO_DEBUGGER(); - result = dal_fixed31_32_one; + result = dc_fixpt_one; } return result; @@ -558,7 +558,7 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma, struct fixed31_32 x; struct fixed31_32 output; struct fixed31_32 scaling_factor = - dal_fixed31_32_from_fraction(sdr_white_level, 10000); + dc_fixpt_from_fraction(sdr_white_level, 10000); if (!pq_initialized && sdr_white_level == 80) { precompute_pq(); @@ -579,15 +579,15 @@ static void build_pq(struct pwl_float_data_ex *rgb_regamma, if (sdr_white_level == 80) { output = pq_table[i]; } else { - x = dal_fixed31_32_mul(coord_x->x, scaling_factor); + x = dc_fixpt_mul(coord_x->x, scaling_factor); compute_pq(x, &output); } /* should really not happen? */ - if (dal_fixed31_32_lt(output, dal_fixed31_32_zero)) - output = dal_fixed31_32_zero; - else if (dal_fixed31_32_lt(dal_fixed31_32_one, output)) - output = dal_fixed31_32_one; + if (dc_fixpt_lt(output, dc_fixpt_zero)) + output = dc_fixpt_zero; + else if (dc_fixpt_lt(dc_fixpt_one, output)) + output = dc_fixpt_one; rgb->r = output; rgb->g = output; @@ -605,7 +605,7 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq, uint32_t i; struct fixed31_32 output; - struct fixed31_32 scaling_factor = dal_fixed31_32_from_int(125); + struct fixed31_32 scaling_factor = dc_fixpt_from_int(125); if (!de_pq_initialized) { precompute_de_pq(); @@ -616,9 +616,9 @@ static void build_de_pq(struct pwl_float_data_ex *de_pq, for (i = 0; i <= hw_points_num; i++) { output = de_pq_table[i]; /* should really not happen? */ - if (dal_fixed31_32_lt(output, dal_fixed31_32_zero)) - output = dal_fixed31_32_zero; - else if (dal_fixed31_32_lt(scaling_factor, output)) + if (dc_fixpt_lt(output, dc_fixpt_zero)) + output = dc_fixpt_zero; + else if (dc_fixpt_lt(scaling_factor, output)) output = scaling_factor; de_pq[i].r = output; de_pq[i].g = output; @@ -670,9 +670,9 @@ static void build_degamma(struct pwl_float_data_ex *curve, end_index = begin_index + 12 * NUM_PTS_IN_REGION; while (i != begin_index) { - curve[i].r = dal_fixed31_32_zero; - curve[i].g = dal_fixed31_32_zero; - curve[i].b = dal_fixed31_32_zero; + curve[i].r = dc_fixpt_zero; + curve[i].g = dc_fixpt_zero; + curve[i].b = dc_fixpt_zero; i++; } @@ -684,9 +684,9 @@ static void build_degamma(struct pwl_float_data_ex *curve, i++; } while (i != hw_points_num + 1) { - curve[i].r = dal_fixed31_32_one; - curve[i].g = dal_fixed31_32_one; - curve[i].b = dal_fixed31_32_one; + curve[i].r = dc_fixpt_one; + curve[i].g = dc_fixpt_one; + curve[i].b = dc_fixpt_one; i++; } } @@ -695,8 +695,8 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb, const struct dc_gamma *ramp, struct dividers dividers) { - const struct fixed31_32 max_driver = dal_fixed31_32_from_int(0xFFFF); - const struct fixed31_32 max_os = dal_fixed31_32_from_int(0xFF00); + const struct fixed31_32 max_driver = dc_fixpt_from_int(0xFFFF); + const struct fixed31_32 max_os = dc_fixpt_from_int(0xFF00); struct fixed31_32 scaler = max_os; uint32_t i; struct pwl_float_data *rgb = pwl_rgb; @@ -705,9 +705,9 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb, i = 0; do { - if (dal_fixed31_32_lt(max_os, ramp->entries.red[i]) || - dal_fixed31_32_lt(max_os, ramp->entries.green[i]) || - dal_fixed31_32_lt(max_os, ramp->entries.blue[i])) { + if (dc_fixpt_lt(max_os, ramp->entries.red[i]) || + dc_fixpt_lt(max_os, ramp->entries.green[i]) || + dc_fixpt_lt(max_os, ramp->entries.blue[i])) { scaler = max_driver; break; } @@ -717,40 +717,40 @@ static void scale_gamma(struct pwl_float_data *pwl_rgb, i = 0; do { - rgb->r = dal_fixed31_32_div( + rgb->r = dc_fixpt_div( ramp->entries.red[i], scaler); - rgb->g = dal_fixed31_32_div( + rgb->g = dc_fixpt_div( ramp->entries.green[i], scaler); - rgb->b = dal_fixed31_32_div( + rgb->b = dc_fixpt_div( ramp->entries.blue[i], scaler); ++rgb; ++i; } while (i != ramp->num_entries); - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider1); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider1); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider1); ++rgb; - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider2); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider2); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider2); ++rgb; - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider3); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider3); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider3); } @@ -759,62 +759,62 @@ static void scale_gamma_dx(struct pwl_float_data *pwl_rgb, struct dividers dividers) { uint32_t i; - struct fixed31_32 min = dal_fixed31_32_zero; - struct fixed31_32 max = dal_fixed31_32_one; + struct fixed31_32 min = dc_fixpt_zero; + struct fixed31_32 max = dc_fixpt_one; - struct fixed31_32 delta = dal_fixed31_32_zero; - struct fixed31_32 offset = dal_fixed31_32_zero; + struct fixed31_32 delta = dc_fixpt_zero; + struct fixed31_32 offset = dc_fixpt_zero; for (i = 0 ; i < ramp->num_entries; i++) { - if (dal_fixed31_32_lt(ramp->entries.red[i], min)) + if (dc_fixpt_lt(ramp->entries.red[i], min)) min = ramp->entries.red[i]; - if (dal_fixed31_32_lt(ramp->entries.green[i], min)) + if (dc_fixpt_lt(ramp->entries.green[i], min)) min = ramp->entries.green[i]; - if (dal_fixed31_32_lt(ramp->entries.blue[i], min)) + if (dc_fixpt_lt(ramp->entries.blue[i], min)) min = ramp->entries.blue[i]; - if (dal_fixed31_32_lt(max, ramp->entries.red[i])) + if (dc_fixpt_lt(max, ramp->entries.red[i])) max = ramp->entries.red[i]; - if (dal_fixed31_32_lt(max, ramp->entries.green[i])) + if (dc_fixpt_lt(max, ramp->entries.green[i])) max = ramp->entries.green[i]; - if (dal_fixed31_32_lt(max, ramp->entries.blue[i])) + if (dc_fixpt_lt(max, ramp->entries.blue[i])) max = ramp->entries.blue[i]; } - if (dal_fixed31_32_lt(min, dal_fixed31_32_zero)) - delta = dal_fixed31_32_neg(min); + if (dc_fixpt_lt(min, dc_fixpt_zero)) + delta = dc_fixpt_neg(min); - offset = dal_fixed31_32_add(min, max); + offset = dc_fixpt_add(min, max); for (i = 0 ; i < ramp->num_entries; i++) { - pwl_rgb[i].r = dal_fixed31_32_div( - dal_fixed31_32_add( + pwl_rgb[i].r = dc_fixpt_div( + dc_fixpt_add( ramp->entries.red[i], delta), offset); - pwl_rgb[i].g = dal_fixed31_32_div( - dal_fixed31_32_add( + pwl_rgb[i].g = dc_fixpt_div( + dc_fixpt_add( ramp->entries.green[i], delta), offset); - pwl_rgb[i].b = dal_fixed31_32_div( - dal_fixed31_32_add( + pwl_rgb[i].b = dc_fixpt_div( + dc_fixpt_add( ramp->entries.blue[i], delta), offset); } - pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r); - pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g); - pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); ++i; - pwl_rgb[i].r = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].r = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].r, 2), pwl_rgb[i-2].r); - pwl_rgb[i].g = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].g = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].g, 2), pwl_rgb[i-2].g); - pwl_rgb[i].b = dal_fixed31_32_sub(dal_fixed31_32_mul_int( + pwl_rgb[i].b = dc_fixpt_sub(dc_fixpt_mul_int( pwl_rgb[i-1].b, 2), pwl_rgb[i-2].b); } @@ -846,40 +846,40 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb, i = 0; do { - rgb->r = dal_fixed31_32_from_fraction( + rgb->r = dc_fixpt_from_fraction( ramp->gamma[i], scaler); - rgb->g = dal_fixed31_32_from_fraction( + rgb->g = dc_fixpt_from_fraction( ramp->gamma[i + 256], scaler); - rgb->b = dal_fixed31_32_from_fraction( + rgb->b = dc_fixpt_from_fraction( ramp->gamma[i + 512], scaler); ++rgb; ++i; } while (i != GAMMA_RGB_256_ENTRIES); - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider1); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider1); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider1); ++rgb; - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider2); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider2); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider2); ++rgb; - rgb->r = dal_fixed31_32_mul(rgb_last->r, + rgb->r = dc_fixpt_mul(rgb_last->r, dividers.divider3); - rgb->g = dal_fixed31_32_mul(rgb_last->g, + rgb->g = dc_fixpt_mul(rgb_last->g, dividers.divider3); - rgb->b = dal_fixed31_32_mul(rgb_last->b, + rgb->b = dc_fixpt_mul(rgb_last->b, dividers.divider3); } @@ -913,7 +913,7 @@ static void apply_lut_1d( struct fixed31_32 lut2; const int max_lut_index = 4095; const struct fixed31_32 max_lut_index_f = - dal_fixed31_32_from_int_nonconst(max_lut_index); + dc_fixpt_from_int_nonconst(max_lut_index); int32_t index = 0, index_next = 0; struct fixed31_32 index_f; struct fixed31_32 delta_lut; @@ -931,10 +931,10 @@ static void apply_lut_1d( else regamma_y = &tf_pts->blue[i]; - norm_y = dal_fixed31_32_mul(max_lut_index_f, + norm_y = dc_fixpt_mul(max_lut_index_f, *regamma_y); - index = dal_fixed31_32_floor(norm_y); - index_f = dal_fixed31_32_from_int_nonconst(index); + index = dc_fixpt_floor(norm_y); + index_f = dc_fixpt_from_int_nonconst(index); if (index < 0 || index > max_lut_index) continue; @@ -953,11 +953,11 @@ static void apply_lut_1d( } // we have everything now, so interpolate - delta_lut = dal_fixed31_32_sub(lut2, lut1); - delta_index = dal_fixed31_32_sub(norm_y, index_f); + delta_lut = dc_fixpt_sub(lut2, lut1); + delta_index = dc_fixpt_sub(norm_y, index_f); - *regamma_y = dal_fixed31_32_add(lut1, - dal_fixed31_32_mul(delta_index, delta_lut)); + *regamma_y = dc_fixpt_add(lut1, + dc_fixpt_mul(delta_index, delta_lut)); } } } @@ -973,7 +973,7 @@ static void build_evenly_distributed_points( uint32_t i = 0; do { - struct fixed31_32 value = dal_fixed31_32_from_fraction(i, + struct fixed31_32 value = dc_fixpt_from_fraction(i, numberof_points - 1); p->r = value; @@ -984,21 +984,21 @@ static void build_evenly_distributed_points( ++i; } while (i != numberof_points); - p->r = dal_fixed31_32_div(p_last->r, dividers.divider1); - p->g = dal_fixed31_32_div(p_last->g, dividers.divider1); - p->b = dal_fixed31_32_div(p_last->b, dividers.divider1); + p->r = dc_fixpt_div(p_last->r, dividers.divider1); + p->g = dc_fixpt_div(p_last->g, dividers.divider1); + p->b = dc_fixpt_div(p_last->b, dividers.divider1); ++p; - p->r = dal_fixed31_32_div(p_last->r, dividers.divider2); - p->g = dal_fixed31_32_div(p_last->g, dividers.divider2); - p->b = dal_fixed31_32_div(p_last->b, dividers.divider2); + p->r = dc_fixpt_div(p_last->r, dividers.divider2); + p->g = dc_fixpt_div(p_last->g, dividers.divider2); + p->b = dc_fixpt_div(p_last->b, dividers.divider2); ++p; - p->r = dal_fixed31_32_div(p_last->r, dividers.divider3); - p->g = dal_fixed31_32_div(p_last->g, dividers.divider3); - p->b = dal_fixed31_32_div(p_last->b, dividers.divider3); + p->r = dc_fixpt_div(p_last->r, dividers.divider3); + p->g = dc_fixpt_div(p_last->g, dividers.divider3); + p->b = dc_fixpt_div(p_last->b, dividers.divider3); } static inline void copy_rgb_regamma_to_coordinates_x( @@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 *tf_point; struct fixed31_32 hw_x; struct fixed31_32 norm_factor = - dal_fixed31_32_from_int_nonconst(255); + dc_fixpt_from_int_nonconst(255); struct fixed31_32 norm_x; struct fixed31_32 index_f; struct fixed31_32 lut1; @@ -1105,9 +1105,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num, i = 0; /* fixed_pt library has problems handling too small values */ while (i != 32) { - tf_pts->red[i] = dal_fixed31_32_zero; - tf_pts->green[i] = dal_fixed31_32_zero; - tf_pts->blue[i] = dal_fixed31_32_zero; + tf_pts->red[i] = dc_fixpt_zero; + tf_pts->green[i] = dc_fixpt_zero; + tf_pts->blue[i] = dc_fixpt_zero; ++i; } while (i <= hw_points_num + 1) { @@ -1129,12 +1129,12 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } else hw_x = coordinates_x[i].x; - norm_x = dal_fixed31_32_mul(norm_factor, hw_x); - index = dal_fixed31_32_floor(norm_x); + norm_x = dc_fixpt_mul(norm_factor, hw_x); + index = dc_fixpt_floor(norm_x); if (index < 0 || index > 255) continue; - index_f = dal_fixed31_32_from_int_nonconst(index); + index_f = dc_fixpt_from_int_nonconst(index); index_next = (index == 255) ? index : index + 1; if (color == 0) { @@ -1149,11 +1149,11 @@ static void interpolate_user_regamma(uint32_t hw_points_num, } // we have everything now, so interpolate - delta_lut = dal_fixed31_32_sub(lut2, lut1); - delta_index = dal_fixed31_32_sub(norm_x, index_f); + delta_lut = dc_fixpt_sub(lut2, lut1); + delta_index = dc_fixpt_sub(norm_x, index_f); - *tf_point = dal_fixed31_32_add(lut1, - dal_fixed31_32_mul(delta_index, delta_lut)); + *tf_point = dc_fixpt_add(lut1, + dc_fixpt_mul(delta_index, delta_lut)); } ++i; } @@ -1168,15 +1168,15 @@ static void build_new_custom_resulted_curve( i = 0; while (i != hw_points_num + 1) { - tf_pts->red[i] = dal_fixed31_32_clamp( - tf_pts->red[i], dal_fixed31_32_zero, - dal_fixed31_32_one); - tf_pts->green[i] = dal_fixed31_32_clamp( - tf_pts->green[i], dal_fixed31_32_zero, - dal_fixed31_32_one); - tf_pts->blue[i] = dal_fixed31_32_clamp( - tf_pts->blue[i], dal_fixed31_32_zero, - dal_fixed31_32_one); + tf_pts->red[i] = dc_fixpt_clamp( + tf_pts->red[i], dc_fixpt_zero, + dc_fixpt_one); + tf_pts->green[i] = dc_fixpt_clamp( + tf_pts->green[i], dc_fixpt_zero, + dc_fixpt_one); + tf_pts->blue[i] = dc_fixpt_clamp( + tf_pts->blue[i], dc_fixpt_zero, + dc_fixpt_one); ++i; } @@ -1290,9 +1290,9 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, if (!coeff) goto coeff_alloc_fail; - dividers.divider1 = dal_fixed31_32_from_fraction(3, 2); - dividers.divider2 = dal_fixed31_32_from_int(2); - dividers.divider3 = dal_fixed31_32_from_fraction(5, 2); + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); tf = output_tf->tf; @@ -1357,15 +1357,15 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, uint32_t i = 0; do { - coeff.a0[i] = dal_fixed31_32_from_fraction( + coeff.a0[i] = dc_fixpt_from_fraction( regamma->coeff.A0[i], 10000000); - coeff.a1[i] = dal_fixed31_32_from_fraction( + coeff.a1[i] = dc_fixpt_from_fraction( regamma->coeff.A1[i], 1000); - coeff.a2[i] = dal_fixed31_32_from_fraction( + coeff.a2[i] = dc_fixpt_from_fraction( regamma->coeff.A2[i], 1000); - coeff.a3[i] = dal_fixed31_32_from_fraction( + coeff.a3[i] = dc_fixpt_from_fraction( regamma->coeff.A3[i], 1000); - coeff.user_gamma[i] = dal_fixed31_32_from_fraction( + coeff.user_gamma[i] = dc_fixpt_from_fraction( regamma->coeff.gamma[i], 1000); ++i; @@ -1374,9 +1374,9 @@ bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, i = 0; /* fixed_pt library has problems handling too small values */ while (i != 32) { - output_tf->tf_pts.red[i] = dal_fixed31_32_zero; - output_tf->tf_pts.green[i] = dal_fixed31_32_zero; - output_tf->tf_pts.blue[i] = dal_fixed31_32_zero; + output_tf->tf_pts.red[i] = dc_fixpt_zero; + output_tf->tf_pts.green[i] = dc_fixpt_zero; + output_tf->tf_pts.blue[i] = dc_fixpt_zero; ++coord_x; ++i; } @@ -1423,9 +1423,9 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf, if (!rgb_regamma) goto rgb_regamma_alloc_fail; - dividers.divider1 = dal_fixed31_32_from_fraction(3, 2); - dividers.divider2 = dal_fixed31_32_from_int(2); - dividers.divider3 = dal_fixed31_32_from_fraction(5, 2); + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); scale_user_regamma_ramp(rgb_user, ®amma->ramp, dividers); @@ -1496,9 +1496,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf, if (!coeff) goto coeff_alloc_fail; - dividers.divider1 = dal_fixed31_32_from_fraction(3, 2); - dividers.divider2 = dal_fixed31_32_from_int(2); - dividers.divider3 = dal_fixed31_32_from_fraction(5, 2); + dividers.divider1 = dc_fixpt_from_fraction(3, 2); + dividers.divider2 = dc_fixpt_from_int(2); + dividers.divider3 = dc_fixpt_from_fraction(5, 2); tf = input_tf->tf; -- cgit v1.2.1 From f3ba7a2fd1ebffe7fc6a9c524754db05dcd0c0e4 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 18 Apr 2018 13:54:24 -0400 Subject: drm/amd/display: inline more of fixed point code Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 156 ++-------------- drivers/gpu/drm/amd/display/include/fixed31_32.h | 207 ++++++++++++--------- .../drm/amd/display/modules/color/color_gamma.c | 8 +- 3 files changed, 135 insertions(+), 236 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index e398ecdf742c..e61dd97d0928 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -64,9 +64,7 @@ static inline unsigned long long complete_integer_division_u64( #define GET_FRACTIONAL_PART(x) \ (FRACTIONAL_PART_MASK & (x)) -struct fixed31_32 dc_fixpt_from_fraction( - long long numerator, - long long denominator) +struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator) { struct fixed31_32 res; @@ -118,63 +116,7 @@ struct fixed31_32 dc_fixpt_from_fraction( return res; } -struct fixed31_32 dc_fixpt_from_int_nonconst( - long long arg) -{ - struct fixed31_32 res; - - ASSERT((LONG_MIN <= arg) && (arg <= LONG_MAX)); - - res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART; - - return res; -} - -struct fixed31_32 dc_fixpt_shl( - struct fixed31_32 arg, - unsigned char shift) -{ - struct fixed31_32 res; - - ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || - ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift))); - - res.value = arg.value << shift; - - return res; -} - -struct fixed31_32 dc_fixpt_add( - struct fixed31_32 arg1, - struct fixed31_32 arg2) -{ - struct fixed31_32 res; - - ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || - ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); - - res.value = arg1.value + arg2.value; - - return res; -} - -struct fixed31_32 dc_fixpt_sub( - struct fixed31_32 arg1, - struct fixed31_32 arg2) -{ - struct fixed31_32 res; - - ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || - ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); - - res.value = arg1.value - arg2.value; - - return res; -} - -struct fixed31_32 dc_fixpt_mul( - struct fixed31_32 arg1, - struct fixed31_32 arg2) +struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2) { struct fixed31_32 res; @@ -225,8 +167,7 @@ struct fixed31_32 dc_fixpt_mul( return res; } -struct fixed31_32 dc_fixpt_sqr( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg) { struct fixed31_32 res; @@ -266,8 +207,7 @@ struct fixed31_32 dc_fixpt_sqr( return res; } -struct fixed31_32 dc_fixpt_recip( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg) { /* * @note @@ -281,8 +221,7 @@ struct fixed31_32 dc_fixpt_recip( arg.value); } -struct fixed31_32 dc_fixpt_sinc( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg) { struct fixed31_32 square; @@ -326,16 +265,14 @@ struct fixed31_32 dc_fixpt_sinc( return res; } -struct fixed31_32 dc_fixpt_sin( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg) { return dc_fixpt_mul( arg, dc_fixpt_sinc(arg)); } -struct fixed31_32 dc_fixpt_cos( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg) { /* TODO implement argument normalization */ @@ -367,8 +304,7 @@ struct fixed31_32 dc_fixpt_cos( * * Calculated as Taylor series. */ -static struct fixed31_32 fixed31_32_exp_from_taylor_series( - struct fixed31_32 arg) +static struct fixed31_32 fixed31_32_exp_from_taylor_series(struct fixed31_32 arg) { unsigned int n = 9; @@ -396,8 +332,7 @@ static struct fixed31_32 fixed31_32_exp_from_taylor_series( res)); } -struct fixed31_32 dc_fixpt_exp( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg) { /* * @brief @@ -440,8 +375,7 @@ struct fixed31_32 dc_fixpt_exp( return dc_fixpt_one; } -struct fixed31_32 dc_fixpt_log( - struct fixed31_32 arg) +struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg) { struct fixed31_32 res = dc_fixpt_neg(dc_fixpt_one); /* TODO improve 1st estimation */ @@ -472,61 +406,6 @@ struct fixed31_32 dc_fixpt_log( return res; } -struct fixed31_32 dc_fixpt_pow( - struct fixed31_32 arg1, - struct fixed31_32 arg2) -{ - return dc_fixpt_exp( - dc_fixpt_mul( - dc_fixpt_log(arg1), - arg2)); -} - -int dc_fixpt_floor( - struct fixed31_32 arg) -{ - unsigned long long arg_value = abs_i64(arg.value); - - if (arg.value >= 0) - return (int)GET_INTEGER_PART(arg_value); - else - return -(int)GET_INTEGER_PART(arg_value); -} - -int dc_fixpt_round( - struct fixed31_32 arg) -{ - unsigned long long arg_value = abs_i64(arg.value); - - const long long summand = dc_fixpt_half.value; - - ASSERT(LLONG_MAX - (long long)arg_value >= summand); - - arg_value += summand; - - if (arg.value >= 0) - return (int)GET_INTEGER_PART(arg_value); - else - return -(int)GET_INTEGER_PART(arg_value); -} - -int dc_fixpt_ceil( - struct fixed31_32 arg) -{ - unsigned long long arg_value = abs_i64(arg.value); - - const long long summand = dc_fixpt_one.value - - dc_fixpt_epsilon.value; - - ASSERT(LLONG_MAX - (long long)arg_value >= summand); - - arg_value += summand; - - if (arg.value >= 0) - return (int)GET_INTEGER_PART(arg_value); - else - return -(int)GET_INTEGER_PART(arg_value); -} /* this function is a generic helper to translate fixed point value to * specified integer format that will consist of integer_bits integer part and @@ -570,32 +449,27 @@ static inline unsigned int clamp_ux_dy( return min_clamp; } -unsigned int dc_fixpt_u2d19( - struct fixed31_32 arg) +unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); } -unsigned int dc_fixpt_u0d19( - struct fixed31_32 arg) +unsigned int dc_fixpt_u0d19(struct fixed31_32 arg) { return ux_dy(arg.value, 0, 19); } -unsigned int dc_fixpt_clamp_u0d14( - struct fixed31_32 arg) +unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 14, 1); } -unsigned int dc_fixpt_clamp_u0d10( - struct fixed31_32 arg) +unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg) { return clamp_ux_dy(arg.value, 0, 10, 1); } -int dc_fixpt_s4d19( - struct fixed31_32 arg) +int dc_fixpt_s4d19(struct fixed31_32 arg) { if (arg.value < 0) return -(int)ux_dy(dc_fixpt_abs(arg).value, 4, 19); diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index b5b8d7dea373..ebfd33e91ee8 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -70,24 +70,19 @@ static const struct fixed31_32 dc_fixpt_ln2_div_2 = { 1488522236LL }; * @brief * result = numerator / denominator */ -struct fixed31_32 dc_fixpt_from_fraction( - long long numerator, - long long denominator); +struct fixed31_32 dc_fixpt_from_fraction(long long numerator, long long denominator); /* * @brief * result = arg */ -struct fixed31_32 dc_fixpt_from_int_nonconst(long long arg); -static inline struct fixed31_32 dc_fixpt_from_int(long long arg) +static inline struct fixed31_32 dc_fixpt_from_int(int arg) { - if (__builtin_constant_p(arg)) { - struct fixed31_32 res; - BUILD_BUG_ON((LONG_MIN > arg) || (arg > LONG_MAX)); - res.value = arg << FIXED31_32_BITS_PER_FRACTIONAL_PART; - return res; - } else - return dc_fixpt_from_int_nonconst(arg); + struct fixed31_32 res; + + res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART; + + return res; } /* @@ -129,8 +124,7 @@ static inline struct fixed31_32 dc_fixpt_abs(struct fixed31_32 arg) * @brief * result = arg1 < arg2 */ -static inline bool dc_fixpt_lt(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline bool dc_fixpt_lt(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value < arg2.value; } @@ -139,8 +133,7 @@ static inline bool dc_fixpt_lt(struct fixed31_32 arg1, * @brief * result = arg1 <= arg2 */ -static inline bool dc_fixpt_le(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline bool dc_fixpt_le(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value <= arg2.value; } @@ -149,8 +142,7 @@ static inline bool dc_fixpt_le(struct fixed31_32 arg1, * @brief * result = arg1 == arg2 */ -static inline bool dc_fixpt_eq(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline bool dc_fixpt_eq(struct fixed31_32 arg1, struct fixed31_32 arg2) { return arg1.value == arg2.value; } @@ -159,8 +151,7 @@ static inline bool dc_fixpt_eq(struct fixed31_32 arg1, * @brief * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2 */ -static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, struct fixed31_32 arg2) { if (arg1.value <= arg2.value) return arg1; @@ -172,8 +163,7 @@ static inline struct fixed31_32 dc_fixpt_min(struct fixed31_32 arg1, * @brief * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1 */ -static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline struct fixed31_32 dc_fixpt_max(struct fixed31_32 arg1, struct fixed31_32 arg2) { if (arg1.value <= arg2.value) return arg2; @@ -209,17 +199,23 @@ static inline struct fixed31_32 dc_fixpt_clamp( * @brief * result = arg << shift */ -struct fixed31_32 dc_fixpt_shl( - struct fixed31_32 arg, - unsigned char shift); +static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift) +{ + struct fixed31_32 res; + + ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || + ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift))); + + res.value = arg.value << shift; + + return res; +} /* * @brief * result = arg >> shift */ -static inline struct fixed31_32 dc_fixpt_shr( - struct fixed31_32 arg, - unsigned char shift) +static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift) { struct fixed31_32 res; res.value = arg.value >> shift; @@ -235,38 +231,50 @@ static inline struct fixed31_32 dc_fixpt_shr( * @brief * result = arg1 + arg2 */ -struct fixed31_32 dc_fixpt_add( - struct fixed31_32 arg1, - struct fixed31_32 arg2); +static inline struct fixed31_32 dc_fixpt_add(struct fixed31_32 arg1, struct fixed31_32 arg2) +{ + struct fixed31_32 res; + + ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) || + ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value))); + + res.value = arg1.value + arg2.value; + + return res; +} /* * @brief * result = arg1 + arg2 */ -static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1, - int arg2) +static inline struct fixed31_32 dc_fixpt_add_int(struct fixed31_32 arg1, int arg2) { - return dc_fixpt_add(arg1, - dc_fixpt_from_int(arg2)); + return dc_fixpt_add(arg1, dc_fixpt_from_int(arg2)); } /* * @brief * result = arg1 - arg2 */ -struct fixed31_32 dc_fixpt_sub( - struct fixed31_32 arg1, - struct fixed31_32 arg2); +static inline struct fixed31_32 dc_fixpt_sub(struct fixed31_32 arg1, struct fixed31_32 arg2) +{ + struct fixed31_32 res; + + ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) || + ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value))); + + res.value = arg1.value - arg2.value; + + return res; +} /* * @brief * result = arg1 - arg2 */ -static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, - int arg2) +static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, int arg2) { - return dc_fixpt_sub(arg1, - dc_fixpt_from_int(arg2)); + return dc_fixpt_sub(arg1, dc_fixpt_from_int(arg2)); } @@ -279,49 +287,40 @@ static inline struct fixed31_32 dc_fixpt_sub_int(struct fixed31_32 arg1, * @brief * result = arg1 * arg2 */ -struct fixed31_32 dc_fixpt_mul( - struct fixed31_32 arg1, - struct fixed31_32 arg2); +struct fixed31_32 dc_fixpt_mul(struct fixed31_32 arg1, struct fixed31_32 arg2); /* * @brief * result = arg1 * arg2 */ -static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1, - int arg2) +static inline struct fixed31_32 dc_fixpt_mul_int(struct fixed31_32 arg1, int arg2) { - return dc_fixpt_mul(arg1, - dc_fixpt_from_int(arg2)); + return dc_fixpt_mul(arg1, dc_fixpt_from_int(arg2)); } /* * @brief * result = square(arg) := arg * arg */ -struct fixed31_32 dc_fixpt_sqr( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_sqr(struct fixed31_32 arg); /* * @brief * result = arg1 / arg2 */ -static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1, - long long arg2) +static inline struct fixed31_32 dc_fixpt_div_int(struct fixed31_32 arg1, long long arg2) { - return dc_fixpt_from_fraction(arg1.value, - dc_fixpt_from_int(arg2).value); + return dc_fixpt_from_fraction(arg1.value, dc_fixpt_from_int(arg2).value); } /* * @brief * result = arg1 / arg2 */ -static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, - struct fixed31_32 arg2) +static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, struct fixed31_32 arg2) { - return dc_fixpt_from_fraction(arg1.value, - arg2.value); + return dc_fixpt_from_fraction(arg1.value, arg2.value); } /* @@ -336,8 +335,7 @@ static inline struct fixed31_32 dc_fixpt_div(struct fixed31_32 arg1, * @note * No special actions taken in case argument is zero. */ -struct fixed31_32 dc_fixpt_recip( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_recip(struct fixed31_32 arg); /* * @brief @@ -352,8 +350,7 @@ struct fixed31_32 dc_fixpt_recip( * Argument specified in radians, * internally it's normalized to [-2pi...2pi] range. */ -struct fixed31_32 dc_fixpt_sinc( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_sinc(struct fixed31_32 arg); /* * @brief @@ -363,8 +360,7 @@ struct fixed31_32 dc_fixpt_sinc( * Argument specified in radians, * internally it's normalized to [-2pi...2pi] range. */ -struct fixed31_32 dc_fixpt_sin( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_sin(struct fixed31_32 arg); /* * @brief @@ -376,8 +372,7 @@ struct fixed31_32 dc_fixpt_sin( * passing arguments outside that range * will cause incorrect result! */ -struct fixed31_32 dc_fixpt_cos( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_cos(struct fixed31_32 arg); /* * @brief @@ -391,8 +386,7 @@ struct fixed31_32 dc_fixpt_cos( * @note * Currently, function is verified for abs(arg) <= 1. */ -struct fixed31_32 dc_fixpt_exp( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_exp(struct fixed31_32 arg); /* * @brief @@ -404,8 +398,7 @@ struct fixed31_32 dc_fixpt_exp( * Currently, no special actions taken * in case of invalid argument(s). Take care! */ -struct fixed31_32 dc_fixpt_log( - struct fixed31_32 arg); +struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); /* * @brief @@ -419,9 +412,13 @@ struct fixed31_32 dc_fixpt_log( * @note * Currently, abs(arg1) should be less than 1. Take care! */ -struct fixed31_32 dc_fixpt_pow( - struct fixed31_32 arg1, - struct fixed31_32 arg2); +static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) +{ + return dc_fixpt_exp( + dc_fixpt_mul( + dc_fixpt_log(arg1), + arg2)); +} /* * @brief @@ -432,22 +429,56 @@ struct fixed31_32 dc_fixpt_pow( * @brief * result = floor(arg) := greatest integer lower than or equal to arg */ -int dc_fixpt_floor( - struct fixed31_32 arg); +static inline int dc_fixpt_floor(struct fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} /* * @brief * result = round(arg) := integer nearest to arg */ -int dc_fixpt_round( - struct fixed31_32 arg); +static inline int dc_fixpt_round(struct fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + const long long summand = dc_fixpt_half.value; + + ASSERT(LLONG_MAX - (long long)arg_value >= summand); + + arg_value += summand; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} /* * @brief * result = ceil(arg) := lowest integer greater than or equal to arg */ -int dc_fixpt_ceil( - struct fixed31_32 arg); +static inline int dc_fixpt_ceil(struct fixed31_32 arg) +{ + unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value; + + const long long summand = dc_fixpt_one.value - + dc_fixpt_epsilon.value; + + ASSERT(LLONG_MAX - (long long)arg_value >= summand); + + arg_value += summand; + + if (arg.value >= 0) + return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); + else + return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART); +} /* the following two function are used in scaler hw programming to convert fixed * point value to format 2 bits from integer part and 19 bits from fractional @@ -455,20 +486,14 @@ int dc_fixpt_ceil( * fractional */ -unsigned int dc_fixpt_u2d19( - struct fixed31_32 arg); - -unsigned int dc_fixpt_u0d19( - struct fixed31_32 arg); +unsigned int dc_fixpt_u2d19(struct fixed31_32 arg); +unsigned int dc_fixpt_u0d19(struct fixed31_32 arg); -unsigned int dc_fixpt_clamp_u0d14( - struct fixed31_32 arg); +unsigned int dc_fixpt_clamp_u0d14(struct fixed31_32 arg); -unsigned int dc_fixpt_clamp_u0d10( - struct fixed31_32 arg); +unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg); -int dc_fixpt_s4d19( - struct fixed31_32 arg); +int dc_fixpt_s4d19(struct fixed31_32 arg); #endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 29d2ec82b924..e803b375e835 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -913,7 +913,7 @@ static void apply_lut_1d( struct fixed31_32 lut2; const int max_lut_index = 4095; const struct fixed31_32 max_lut_index_f = - dc_fixpt_from_int_nonconst(max_lut_index); + dc_fixpt_from_int(max_lut_index); int32_t index = 0, index_next = 0; struct fixed31_32 index_f; struct fixed31_32 delta_lut; @@ -934,7 +934,7 @@ static void apply_lut_1d( norm_y = dc_fixpt_mul(max_lut_index_f, *regamma_y); index = dc_fixpt_floor(norm_y); - index_f = dc_fixpt_from_int_nonconst(index); + index_f = dc_fixpt_from_int(index); if (index < 0 || index > max_lut_index) continue; @@ -1094,7 +1094,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, struct fixed31_32 *tf_point; struct fixed31_32 hw_x; struct fixed31_32 norm_factor = - dc_fixpt_from_int_nonconst(255); + dc_fixpt_from_int(255); struct fixed31_32 norm_x; struct fixed31_32 index_f; struct fixed31_32 lut1; @@ -1134,7 +1134,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num, if (index < 0 || index > 255) continue; - index_f = dc_fixpt_from_int_nonconst(index); + index_f = dc_fixpt_from_int(index); index_next = (index == 255) ? index : index + 1; if (color == 0) { -- cgit v1.2.1 From e8838df1cb987fe690dfd069824ff08107327607 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 19 Apr 2018 10:05:22 -0400 Subject: drm/amd/display: Make DisplayStats work with just DC DisplayStats minor Remove dependency on the old FREESYNC_SW_STATS log mask used by DAL2 Also rename from profiling to displaystats Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/basics/logger.c | 2 +- drivers/gpu/drm/amd/display/include/logger_types.h | 2 +- drivers/gpu/drm/amd/display/modules/stats/stats.c | 81 ++++++++++++---------- 3 files changed, 46 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c index 31bee054f43a..0001a3c5b862 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/logger.c +++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c @@ -61,7 +61,7 @@ static const struct dc_log_type_info log_type_info_tbl[] = { {LOG_EVENT_UNDERFLOW, "Underflow"}, {LOG_IF_TRACE, "InterfaceTrace"}, {LOG_DTN, "DTN"}, - {LOG_PROFILING, "Profiling"} + {LOG_DISPLAYSTATS, "DisplayStats"} }; diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index b608a0830801..0a540b9897a6 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -98,7 +98,7 @@ enum dc_log_type { LOG_EVENT_UNDERFLOW, LOG_IF_TRACE, LOG_PERF_TRACE, - LOG_PROFILING, + LOG_DISPLAYSTATS, LOG_SECTION_TOTAL_COUNT }; diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 48e02197919f..d16aac7b30b3 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -177,44 +177,51 @@ void mod_stats_dump(struct mod_stats *mod_stats) logger = dc->ctx->logger; time = core_stats->time; - //LogEntry* pLog = GetLog()->Open(LogMajor_ISR, LogMinor_ISR_FreeSyncSW); - - //if (!pLog->IsDummyEntry()) - { - dm_logger_write(logger, LOG_PROFILING, "==Display Caps==\n"); - dm_logger_write(logger, LOG_PROFILING, "\n"); - dm_logger_write(logger, LOG_PROFILING, "\n"); - - dm_logger_write(logger, LOG_PROFILING, "==Stats==\n"); - dm_logger_write(logger, LOG_PROFILING, - "render avgRender minWindow midPoint maxWindow vsyncToFlip flipToVsync #vsyncBetweenFlip #frame insertDuration vTotalMin vTotalMax eventTrigs vSyncTime1 vSyncTime2 vSyncTime3 vSyncTime4 vSyncTime5 flags\n"); - - for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { - dm_logger_write(logger, LOG_PROFILING, - "%u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u %u", - time[i].render_time_in_us, - time[i].avg_render_time_in_us_last_ten, - time[i].min_window, - time[i].lfc_mid_point_in_us, - time[i].max_window, - time[i].vsync_to_flip_time_in_us, - time[i].flip_to_vsync_time_in_us, - time[i].num_vsync_between_flips, - time[i].num_frames_inserted, - time[i].inserted_duration_in_us, - time[i].v_total_min, - time[i].v_total_max, - time[i].event_triggers, - time[i].v_sync_time_in_us[0], - time[i].v_sync_time_in_us[1], - time[i].v_sync_time_in_us[2], - time[i].v_sync_time_in_us[3], - time[i].v_sync_time_in_us[4], - time[i].flags); - } + dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps=="); + dm_logger_write(logger, LOG_DISPLAYSTATS, " "); + + dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats=="); + dm_logger_write(logger, LOG_DISPLAYSTATS, " "); + + dm_logger_write(logger, LOG_DISPLAYSTATS, + "%10s %10s %10s %10s %10s" + " %11s %11s %17s %10s %14s" + " %10s %10s %10s %10s %10s" + " %10s %10s %10s %10s", + "render", "avgRender", + "minWindow", "midPoint", "maxWindow", + "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip", + "numFrame", "insertDuration", + "vTotalMin", "vTotalMax", "eventTrigs", + "vSyncTime1", "vSyncTime2", "vSyncTime3", + "vSyncTime4", "vSyncTime5", "flags"); + + for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { + dm_logger_write(logger, LOG_DISPLAYSTATS, + "%10u %10u %10u %10u %10u" + " %11u %11u %17u %10u %14u" + " %10u %10u %10u %10u %10u" + " %10u %10u %10u %10u", + time[i].render_time_in_us, + time[i].avg_render_time_in_us_last_ten, + time[i].min_window, + time[i].lfc_mid_point_in_us, + time[i].max_window, + time[i].vsync_to_flip_time_in_us, + time[i].flip_to_vsync_time_in_us, + time[i].num_vsync_between_flips, + time[i].num_frames_inserted, + time[i].inserted_duration_in_us, + time[i].v_total_min, + time[i].v_total_max, + time[i].event_triggers, + time[i].v_sync_time_in_us[0], + time[i].v_sync_time_in_us[1], + time[i].v_sync_time_in_us[2], + time[i].v_sync_time_in_us[3], + time[i].v_sync_time_in_us[4], + time[i].flags); } - //GetLog()->Close(pLog); - //GetLog()->UnSetLogMask(LogMajor_ISR, LogMinor_ISR_FreeSyncSW); } void mod_stats_reset_data(struct mod_stats *mod_stats) -- cgit v1.2.1 From ab9c2062d960df84d41c03efc49cb01071b398c6 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 18 Apr 2018 14:11:43 -0400 Subject: drm/amd/display: add fixed point fractional bit truncation function Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index ebfd33e91ee8..61f11e23bf70 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -496,4 +496,21 @@ unsigned int dc_fixpt_clamp_u0d10(struct fixed31_32 arg); int dc_fixpt_s4d19(struct fixed31_32 arg); +static inline struct fixed31_32 dc_fixpt_truncate(struct fixed31_32 arg, unsigned int frac_bits) +{ + bool negative = arg.value < 0; + + if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) { + ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART); + return arg; + } + + if (negative) + arg.value = -arg.value; + arg.value &= (~0LL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits); + if (negative) + arg.value = -arg.value; + return arg; +} + #endif -- cgit v1.2.1 From 0002d3ac8aadcb2850475557de32234b447ba502 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Wed, 18 Apr 2018 14:19:23 -0400 Subject: drm/amd/display: truncate scaling ratios and inits to 19 bit precision Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 25 +++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 082458f2097c..751f3ac9d921 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -652,6 +652,14 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.ratios.horz_c.value /= 2; pipe_ctx->plane_res.scl_data.ratios.vert_c.value /= 2; } + pipe_ctx->plane_res.scl_data.ratios.horz = dc_fixpt_truncate( + pipe_ctx->plane_res.scl_data.ratios.horz, 19); + pipe_ctx->plane_res.scl_data.ratios.vert = dc_fixpt_truncate( + pipe_ctx->plane_res.scl_data.ratios.vert, 19); + pipe_ctx->plane_res.scl_data.ratios.horz_c = dc_fixpt_truncate( + pipe_ctx->plane_res.scl_data.ratios.horz_c, 19); + pipe_ctx->plane_res.scl_data.ratios.vert_c = dc_fixpt_truncate( + pipe_ctx->plane_res.scl_data.ratios.vert_c, 19); } static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *recout_skip) @@ -688,17 +696,18 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx, struct view *r * init_bot = init + scaling_ratio * init_c = init + truncated_vp_c_offset(from calculate viewport) */ - data->inits.h = dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2); + data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19); - data->inits.h_c = dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)); + data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19); - data->inits.v = dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2); + data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19); + + data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( + dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19); - data->inits.v_c = dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int( - dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)); /* Adjust for viewport end clip-off */ -- cgit v1.2.1 From 3ba43a59927fbde07414393dfc2b6753cb233e00 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Wed, 18 Apr 2018 14:31:41 -0400 Subject: drm/amd/display: underflow/blankscreen recovery [Description] for any reason, if driver detects HUBP underflow, if a debug option enabled to enable recovery. it will kick in a sequence of recovery. Signed-off-by: Charlene Liu Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 + .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c | 8 ++ .../gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h | 7 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 24 ++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 3 + .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 90 +++++++++++++++++++++- .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 2 + 8 files changed, 135 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index cd4f4341cb53..1c39c9996a04 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -239,6 +239,8 @@ struct dc_debug { bool az_endpoint_mute_only; bool always_use_regamma; bool p010_mpo_support; + bool recovery_enabled; + }; struct dc_state; struct resource_pool; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c index b9fb14a3224b..943143efbb82 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c @@ -476,6 +476,14 @@ void hubbub1_toggle_watermark_change_req(struct hubbub *hubbub) DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, watermark_change_req); } +void hubbub1_soft_reset(struct hubbub *hubbub, bool reset) +{ + uint32_t reset_en = reset ? 1 : 0; + + REG_UPDATE(DCHUBBUB_SOFT_RESET, + DCHUBBUB_GLOBAL_SOFT_RESET, reset_en); +} + static bool hubbub1_dcc_support_swizzle( enum swizzle_mode_values swizzle, unsigned int bytes_per_element, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h index f479f54e5bb2..6315a0e6b0d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h @@ -48,7 +48,8 @@ SR(DCHUBBUB_ARB_DF_REQ_OUTSTAND),\ SR(DCHUBBUB_GLOBAL_TIMER_CNTL), \ SR(DCHUBBUB_TEST_DEBUG_INDEX), \ - SR(DCHUBBUB_TEST_DEBUG_DATA) + SR(DCHUBBUB_TEST_DEBUG_DATA),\ + SR(DCHUBBUB_SOFT_RESET) #define HUBBUB_SR_WATERMARK_REG_LIST()\ SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\ @@ -105,6 +106,7 @@ struct dcn_hubbub_registers { uint32_t DCHUBBUB_SDPIF_AGP_BOT; uint32_t DCHUBBUB_SDPIF_AGP_TOP; uint32_t DCHUBBUB_CRC_CTRL; + uint32_t DCHUBBUB_SOFT_RESET; }; /* set field name */ @@ -114,6 +116,7 @@ struct dcn_hubbub_registers { #define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \ + HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, mask_sh), \ HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE, mask_sh), \ @@ -143,6 +146,7 @@ struct dcn_hubbub_registers { type DCHUBBUB_ARB_SAT_LEVEL;\ type DCHUBBUB_ARB_MIN_REQ_OUTSTAND;\ type DCHUBBUB_GLOBAL_TIMER_REFDIV;\ + type DCHUBBUB_GLOBAL_SOFT_RESET; \ type SDPIF_FB_TOP;\ type SDPIF_FB_BASE;\ type SDPIF_FB_OFFSET;\ @@ -201,6 +205,7 @@ void hubbub1_toggle_watermark_change_req( void hubbub1_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm); +void hubbub1_soft_reset(struct hubbub *hubbub, bool reset); void hubbub1_construct(struct hubbub *hubbub, struct dc_context *ctx, const struct dcn_hubbub_registers *hubbub_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 185f93bda41b..d2ab78b35a7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -78,6 +78,27 @@ static void hubp1_disconnect(struct hubp *hubp) CURSOR_ENABLE, 0); } +static void hubp1_disable_control(struct hubp *hubp, bool disable_hubp) +{ + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + uint32_t disable = disable_hubp ? 1 : 0; + + REG_UPDATE(DCHUBP_CNTL, + HUBP_DISABLE, disable); +} + +static unsigned int hubp1_get_underflow_status(struct hubp *hubp) +{ + uint32_t hubp_underflow = 0; + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + + REG_GET(DCHUBP_CNTL, + HUBP_UNDERFLOW_STATUS, + &hubp_underflow); + + return hubp_underflow; +} + static void hubp1_set_hubp_blank_en(struct hubp *hubp, bool blank) { struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); @@ -1117,6 +1138,9 @@ static struct hubp_funcs dcn10_hubp_funcs = { .hubp_clk_cntl = hubp1_clk_cntl, .hubp_vtg_sel = hubp1_vtg_sel, .hubp_read_state = hubp1_read_state, + .hubp_disable_control = hubp1_disable_control, + .hubp_get_underflow_status = hubp1_get_underflow_status, + }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index fe9b8c4a91ca..af384034398f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -253,6 +253,7 @@ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\ + HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\ HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\ @@ -421,6 +422,7 @@ #define DCN_HUBP_REG_FIELD_LIST(type) \ type HUBP_BLANK_EN;\ + type HUBP_DISABLE;\ type HUBP_TTU_DISABLE;\ type HUBP_NO_OUTSTANDING_REQ;\ type HUBP_VTG_SEL;\ @@ -723,4 +725,5 @@ void hubp1_read_state(struct hubp *hubp); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 50bd7548e230..be8820d8a2e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -747,6 +747,90 @@ static void reset_back_end_for_pipe( pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst); } +static bool dcn10_hw_wa_force_recovery(struct dc *dc) +{ + struct hubp *hubp ; + unsigned int i; + bool need_recover = true; + + if (!dc->debug.recovery_enabled) + return false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + if (hubp != NULL) { + if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) { + /* one pipe underflow, we will reset all the pipes*/ + need_recover = true; + } + } + } + } + if (!need_recover) + return false; + /* + DCHUBP_CNTL:HUBP_BLANK_EN=1 + DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1 + DCHUBP_CNTL:HUBP_DISABLE=1 + DCHUBP_CNTL:HUBP_DISABLE=0 + DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0 + DCSURF_PRIMARY_SURFACE_ADDRESS + DCHUBP_CNTL:HUBP_BLANK_EN=0 + */ + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/ + if (hubp != NULL) + hubp->funcs->set_hubp_blank_en(hubp, true); + } + } + /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/ + hubbub1_soft_reset(dc->res_pool->hubbub, true); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + /*DCHUBP_CNTL:HUBP_DISABLE=1*/ + if (hubp != NULL) + hubp->funcs->hubp_disable_control(hubp, true); + } + } + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + /*DCHUBP_CNTL:HUBP_DISABLE=0*/ + if (hubp != NULL) + hubp->funcs->hubp_disable_control(hubp, true); + } + } + /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/ + hubbub1_soft_reset(dc->res_pool->hubbub, false); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = + &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx != NULL) { + hubp = pipe_ctx->plane_res.hubp; + /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/ + if (hubp != NULL) + hubp->funcs->set_hubp_blank_en(hubp, true); + } + } + return true; + +} + + static void dcn10_verify_allow_pstate_change_high(struct dc *dc) { static bool should_log_hw_state; /* prevent hw state log by default */ @@ -755,8 +839,12 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc) if (should_log_hw_state) { dcn10_log_hw_state(dc); } - BREAK_TO_DEBUGGER(); + if (dcn10_hw_wa_force_recovery(dc)) { + /*check again*/ + if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) + BREAK_TO_DEBUGGER(); + } } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 2c0a3150bf2d..16c84e9ee33b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -446,6 +446,7 @@ static const struct dc_debug debug_defaults_drv = { .vsr_support = true, .performance_trace = false, .az_endpoint_mute_only = true, + .recovery_enabled = false, /*enable this by default after testing.*/ }; static const struct dc_debug debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 331f8ff57ed7..97df82cddf82 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -121,6 +121,8 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); void (*hubp_read_state)(struct hubp *hubp); + void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp); + unsigned int (*hubp_get_underflow_status)(struct hubp *hubp); }; -- cgit v1.2.1 From 6b8e1eb7c6e059d8bb52f24b13081205242fded9 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Tue, 17 Apr 2018 16:50:28 -0400 Subject: drm/amd/display: Update HW sequencer initialization Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 +++--- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h | 2 ++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c | 10 +++++----- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h | 10 ++++++++++ 4 files changed, 20 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index be8820d8a2e6..24bcc5e58720 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -849,7 +849,7 @@ static void dcn10_verify_allow_pstate_change_high(struct dc *dc) } /* trigger HW to start disconnect plane from stream on the next vsync */ -static void plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) +void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; int dpp_id = pipe_ctx->plane_res.dpp->inst; @@ -1032,7 +1032,7 @@ static void dcn10_init_hw(struct dc *dc) dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true; pipe_ctx->stream_res.opp = dc->res_pool->opps[i]; - plane_atomic_disconnect(dc, pipe_ctx); + hwss1_plane_atomic_disconnect(dc, pipe_ctx); } for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2267,7 +2267,7 @@ static void dcn10_apply_ctx_for_surface( old_pipe_ctx->plane_state && old_pipe_ctx->stream_res.tg == tg) { - plane_atomic_disconnect(dc, old_pipe_ctx); + hwss1_plane_atomic_disconnect(dc, old_pipe_ctx); removed_pipe[i] = true; DC_LOG_DC( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h index 6c526b5095d9..44f734b73f9e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h @@ -37,4 +37,6 @@ extern void fill_display_configs( bool is_rgb_cspace(enum dc_color_space output_color_space); +void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx); + #endif /* __DC_HWSS_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index c734b7fa5835..f2fbce0e3fc5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -360,7 +360,7 @@ void optc1_program_timing( } -static void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable) +void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -1257,20 +1257,20 @@ void optc1_read_otg_state(struct optc *optc1, OPTC_UNDERFLOW_OCCURRED_STATUS, &s->underflow_occurred_status); } -static void optc1_clear_optc_underflow(struct timing_generator *optc) +void optc1_clear_optc_underflow(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1); } -static void optc1_tg_init(struct timing_generator *optc) +void optc1_tg_init(struct timing_generator *optc) { optc1_set_blank_data_double_buffer(optc, true); optc1_clear_optc_underflow(optc); } -static bool optc1_is_tg_enabled(struct timing_generator *optc) +bool optc1_is_tg_enabled(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t otg_enabled = 0; @@ -1281,7 +1281,7 @@ static bool optc1_is_tg_enabled(struct timing_generator *optc) } -static bool optc1_is_optc_underflow_occurred(struct timing_generator *optc) +bool optc1_is_optc_underflow_occurred(struct timing_generator *optc) { struct optc *optc1 = DCN10TG_FROM_TG(optc); uint32_t underflow_occurred = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 89e09e5327a2..c62052f46460 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -497,4 +497,14 @@ void optc1_program_stereo(struct timing_generator *optc, bool optc1_is_stereo_left_eye(struct timing_generator *optc); +void optc1_clear_optc_underflow(struct timing_generator *optc); + +void optc1_tg_init(struct timing_generator *optc); + +bool optc1_is_tg_enabled(struct timing_generator *optc); + +bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); + +void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); + #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */ -- cgit v1.2.1 From a21ddec61c5ed30b58eea3268ad3e0c69452ebfe Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 23 Apr 2018 12:41:34 -0400 Subject: drm/amd/display: fix 31_32_fixpt shift functions Signed-off-by: Dmytro Laktyushkin Reviewed-by: Eric Yang Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 26 ++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 61f11e23bf70..bd8a30462258 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -27,6 +27,12 @@ #define __DAL_FIXED31_32_H__ #define FIXED31_32_BITS_PER_FRACTIONAL_PART 32 +#ifndef LLONG_MIN +#define LLONG_MIN (1LL<<63) +#endif +#ifndef LLONG_MAX +#define LLONG_MAX (-1LL>>1) +#endif /* * @brief @@ -45,6 +51,7 @@ struct fixed31_32 { long long value; }; + /* * @brief * Useful constants @@ -201,14 +208,12 @@ static inline struct fixed31_32 dc_fixpt_clamp( */ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift) { - struct fixed31_32 res; - ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || - ((arg.value < 0) && (arg.value >= LLONG_MIN >> shift))); + ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift))))); - res.value = arg.value << shift; + arg.value = arg.value << shift; - return res; + return arg; } /* @@ -217,9 +222,14 @@ static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned cha */ static inline struct fixed31_32 dc_fixpt_shr(struct fixed31_32 arg, unsigned char shift) { - struct fixed31_32 res; - res.value = arg.value >> shift; - return res; + bool negative = arg.value < 0; + + if (negative) + arg.value = -arg.value; + arg.value = arg.value >> shift; + if (negative) + arg.value = -arg.value; + return arg; } /* -- cgit v1.2.1 From 7ea034ce8188eaf61ce2b7d4e747e1f6e3bb8aa3 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 23 Apr 2018 14:39:23 -0400 Subject: drm/amd/display: fix a 32 bit shift meant to be 64 warning Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index bd8a30462258..76f64e910422 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp( static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift) { ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || - ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1 << shift))))); + ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift))))); arg.value = arg.value << shift; -- cgit v1.2.1 From 3f460907be1b53441526e644019bcf150c433f59 Mon Sep 17 00:00:00 2001 From: Xingyue Tao Date: Thu, 19 Apr 2018 16:23:12 -0400 Subject: drm/amd/display: Add dc cap to restrict VSR downscaling src size - Adds int max_downscale_src_width in dc struct - Checks and does not support if downscale size is more than 4k (width > 3840) Signed-off-by: Xingyue Tao Reviewed-by: Charlene Liu Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 16 +++++++++++----- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + 3 files changed, 13 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1c39c9996a04..08b29a742921 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -203,6 +203,7 @@ struct dc_debug { bool clock_trace; bool validation_trace; bool bandwidth_calcs_trace; + int max_downscale_src_width; /* stutter efficiency related */ bool disable_stutter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 20796da36de4..2da138904312 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -145,12 +145,18 @@ bool dpp_get_optimal_number_of_taps( else pixel_width = scl_data->viewport.width; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active && - dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; + scl_data->viewport.height != scl_data->v_active) { + + /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ + if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + + if (dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; + } /* TODO: add lb check */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 16c84e9ee33b..f69f3a54f001 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -447,6 +447,7 @@ static const struct dc_debug debug_defaults_drv = { .performance_trace = false, .az_endpoint_mute_only = true, .recovery_enabled = false, /*enable this by default after testing.*/ + .max_downscale_src_width = 3840, }; static const struct dc_debug debug_defaults_diags = { -- cgit v1.2.1 From 07049507fd1b5813f667bb34e6903369487f9e34 Mon Sep 17 00:00:00 2001 From: Yue Hin Lau Date: Wed, 18 Apr 2018 16:07:04 -0400 Subject: drm/amd/display: disable mpo if brightness adjusted Signed-off-by: Yue Hin Lau Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 08b29a742921..7a9f600662ce 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -75,6 +75,7 @@ struct dc_caps { bool dynamic_audio; bool is_apu; bool dual_link_dvi; + bool post_blend_color_processing; }; struct dc_dcc_surface_param { diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f69f3a54f001..ace2e03dced4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -1023,6 +1023,7 @@ static bool construct( dc->caps.max_cursor_size = 256; dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; + dc->caps.post_blend_color_processing = false; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; -- cgit v1.2.1 From a3cb1c1c8e5e494f7630349fbebb79b1787128a1 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Mon, 23 Apr 2018 15:55:36 -0400 Subject: drm/amd/display: Log DTN only after the atomic commit in Diag Also print HUBP info only if pipe enabled. This fixes having different DTN logs for different test sequences. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 119 +++++++++++---------- 1 file changed, 62 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 24bcc5e58720..c452972bf1c3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -127,24 +127,26 @@ static void dcn10_log_hubp_states(struct dc *dc) hubp->funcs->hubp_read_state(hubp); - DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" - " %6d %8d %7d %8xh", - hubp->inst, - s->pixel_format, - s->inuse_addr_hi, - s->viewport_width, - s->viewport_height, - s->rotation_angle, - s->h_mirror_en, - s->sw_mode, - s->dcc_en, - s->blank_en, - s->ttu_disable, - s->underflow_status); - DTN_INFO_MICRO_SEC(s->min_ttu_vblank); - DTN_INFO_MICRO_SEC(s->qos_level_low_wm); - DTN_INFO_MICRO_SEC(s->qos_level_high_wm); - DTN_INFO("\n"); + if (!s->blank_en) { + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" + " %6d %8d %7d %8xh", + hubp->inst, + s->pixel_format, + s->inuse_addr_hi, + s->viewport_width, + s->viewport_height, + s->rotation_angle, + s->h_mirror_en, + s->sw_mode, + s->dcc_en, + s->blank_en, + s->ttu_disable, + s->underflow_status); + DTN_INFO_MICRO_SEC(s->min_ttu_vblank); + DTN_INFO_MICRO_SEC(s->qos_level_low_wm); + DTN_INFO_MICRO_SEC(s->qos_level_high_wm); + DTN_INFO("\n"); + } } DTN_INFO("\n=========RQ========\n"); @@ -155,16 +157,17 @@ static void dcn10_log_hubp_states(struct dc *dc) struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; - DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", - i, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode, - rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size, - rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size, - rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size, - rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height, - rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size, - rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size, - rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size, - rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear); + if (!s->blank_en) + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode, + rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size, + rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size, + rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size, + rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height, + rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size, + rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size, + rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size, + rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear); } DTN_INFO("========DLG========\n"); @@ -179,27 +182,28 @@ static void dcn10_log_hubp_states(struct dc *dc) struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; - DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" - " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", - i, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, - dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, - dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, - dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq, - dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l, - dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l, - dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l, - dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l, - dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l, - dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l, - dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l, - dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l, - dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l, - dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l, - dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, - dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, - dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, - dlg_regs->xfc_reg_remote_surface_flip_latency); + if (!s->blank_en) + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh" + " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start, + dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler, + dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank, + dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq, + dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l, + dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l, + dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l, + dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l, + dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l, + dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l, + dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l, + dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l, + dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l, + dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l, + dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1, + dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit, + dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay, + dlg_regs->xfc_reg_remote_surface_flip_latency); } DTN_INFO("========TTU========\n"); @@ -210,14 +214,15 @@ static void dcn10_log_hubp_states(struct dc *dc) struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; - DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", - i, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank, - ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l, - ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0, - ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1, - ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l, - ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0, - ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1); + if (!s->blank_en) + DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n", + pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank, + ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l, + ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0, + ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1, + ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l, + ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0, + ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1); } DTN_INFO("\n"); } -- cgit v1.2.1 From cba5e8708ee6123af14ab1f1196353dcda3eb533 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Tue, 20 Mar 2018 08:25:16 -0400 Subject: drm/amd/display: update dml to allow sync with DV Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dml/display_mode_enums.h | 13 + .../drm/amd/display/dc/dml/display_mode_structs.h | 962 +++++++++++---------- .../gpu/drm/amd/display/dc/dml/dml_inline_defs.h | 10 + 3 files changed, 515 insertions(+), 470 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h index b1ad3553f900..47c19f8fe7d1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h @@ -108,4 +108,17 @@ enum output_standard { dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt }; +enum mpc_combine_affinity { + dm_mpc_always_when_possible, + dm_mpc_reduce_voltage, + dm_mpc_reduce_voltage_and_clocks +}; + +enum self_refresh_affinity { + dm_try_to_allow_self_refresh_and_mclk_switch, + dm_allow_self_refresh_and_mclk_switch, + dm_allow_self_refresh, + dm_neither_self_refresh_nor_mclk_switch +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index ce750edc1e5f..7fa0375939ae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -25,39 +25,39 @@ #ifndef __DISPLAY_MODE_STRUCTS_H__ #define __DISPLAY_MODE_STRUCTS_H__ -typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st; -typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st; -typedef struct _vcs_dpi_ip_params_st ip_params_st; -typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st; -typedef struct _vcs_dpi_display_output_params_st display_output_params_st; -typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st; -typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st; -typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st; -typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st; -typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st; -typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st; -typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st; -typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st; -typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st; -typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st; -typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st; -typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st; -typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st; -typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st; -typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st; -typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st; -typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st; -typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st; -typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st; -typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st; -typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st; -typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st; -typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st; -typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st; -typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st; -typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st; -typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st; -typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st; +typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st; +typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st; +typedef struct _vcs_dpi_ip_params_st ip_params_st; +typedef struct _vcs_dpi_display_pipe_source_params_st display_pipe_source_params_st; +typedef struct _vcs_dpi_display_output_params_st display_output_params_st; +typedef struct _vcs_dpi_display_bandwidth_st display_bandwidth_st; +typedef struct _vcs_dpi_scaler_ratio_depth_st scaler_ratio_depth_st; +typedef struct _vcs_dpi_scaler_taps_st scaler_taps_st; +typedef struct _vcs_dpi_display_pipe_dest_params_st display_pipe_dest_params_st; +typedef struct _vcs_dpi_display_pipe_params_st display_pipe_params_st; +typedef struct _vcs_dpi_display_clocks_and_cfg_st display_clocks_and_cfg_st; +typedef struct _vcs_dpi_display_e2e_pipe_params_st display_e2e_pipe_params_st; +typedef struct _vcs_dpi_dchub_buffer_sizing_st dchub_buffer_sizing_st; +typedef struct _vcs_dpi_watermarks_perf_st watermarks_perf_st; +typedef struct _vcs_dpi_cstate_pstate_watermarks_st cstate_pstate_watermarks_st; +typedef struct _vcs_dpi_wm_calc_pipe_params_st wm_calc_pipe_params_st; +typedef struct _vcs_dpi_vratio_pre_st vratio_pre_st; +typedef struct _vcs_dpi_display_data_rq_misc_params_st display_data_rq_misc_params_st; +typedef struct _vcs_dpi_display_data_rq_sizing_params_st display_data_rq_sizing_params_st; +typedef struct _vcs_dpi_display_data_rq_dlg_params_st display_data_rq_dlg_params_st; +typedef struct _vcs_dpi_display_cur_rq_dlg_params_st display_cur_rq_dlg_params_st; +typedef struct _vcs_dpi_display_rq_dlg_params_st display_rq_dlg_params_st; +typedef struct _vcs_dpi_display_rq_sizing_params_st display_rq_sizing_params_st; +typedef struct _vcs_dpi_display_rq_misc_params_st display_rq_misc_params_st; +typedef struct _vcs_dpi_display_rq_params_st display_rq_params_st; +typedef struct _vcs_dpi_display_dlg_regs_st display_dlg_regs_st; +typedef struct _vcs_dpi_display_ttu_regs_st display_ttu_regs_st; +typedef struct _vcs_dpi_display_data_rq_regs_st display_data_rq_regs_st; +typedef struct _vcs_dpi_display_rq_regs_st display_rq_regs_st; +typedef struct _vcs_dpi_display_dlg_sys_params_st display_dlg_sys_params_st; +typedef struct _vcs_dpi_display_dlg_prefetch_param_st display_dlg_prefetch_param_st; +typedef struct _vcs_dpi_display_pipe_clock_st display_pipe_clock_st; +typedef struct _vcs_dpi_display_arb_params_st display_arb_params_st; struct _vcs_dpi_voltage_scaling_st { int state; @@ -72,89 +72,107 @@ struct _vcs_dpi_voltage_scaling_st { double dppclk_mhz; }; -struct _vcs_dpi_soc_bounding_box_st { - double sr_exit_time_us; - double sr_enter_plus_exit_time_us; - double urgent_latency_us; - double writeback_latency_us; - double ideal_dram_bw_after_urgent_percent; - unsigned int max_request_size_bytes; - double downspread_percent; - double dram_page_open_time_ns; - double dram_rw_turnaround_time_ns; - double dram_return_buffer_per_channel_bytes; - double dram_channel_width_bytes; +struct _vcs_dpi_soc_bounding_box_st { + double sr_exit_time_us; + double sr_enter_plus_exit_time_us; + double urgent_latency_us; + double urgent_latency_pixel_data_only_us; + double urgent_latency_pixel_mixed_with_vm_data_us; + double urgent_latency_vm_data_only_us; + double writeback_latency_us; + double ideal_dram_bw_after_urgent_percent; + double pct_ideal_dram_sdp_bw_after_urgent_pixel_only; // PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelDataOnly + double pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm; + double pct_ideal_dram_sdp_bw_after_urgent_vm_only; + double max_avg_sdp_bw_use_normal_percent; + double max_avg_dram_bw_use_normal_percent; + unsigned int max_request_size_bytes; + double downspread_percent; + double dram_page_open_time_ns; + double dram_rw_turnaround_time_ns; + double dram_return_buffer_per_channel_bytes; + double dram_channel_width_bytes; double fabric_datapath_to_dcn_data_return_bytes; double dcn_downspread_percent; double dispclk_dppclk_vco_speed_mhz; double dfs_vco_period_ps; - unsigned int round_trip_ping_latency_dcfclk_cycles; - unsigned int urgent_out_of_order_return_per_channel_bytes; - unsigned int channel_interleave_bytes; - unsigned int num_banks; - unsigned int num_chans; - unsigned int vmm_page_size_bytes; - double dram_clock_change_latency_us; - double writeback_dram_clock_change_latency_us; - unsigned int return_bus_width_bytes; - unsigned int voltage_override; - double xfc_bus_transport_time_us; - double xfc_xbuf_latency_tolerance_us; + unsigned int urgent_out_of_order_return_per_channel_pixel_only_bytes; + unsigned int urgent_out_of_order_return_per_channel_pixel_and_vm_bytes; + unsigned int urgent_out_of_order_return_per_channel_vm_only_bytes; + unsigned int round_trip_ping_latency_dcfclk_cycles; + unsigned int urgent_out_of_order_return_per_channel_bytes; + unsigned int channel_interleave_bytes; + unsigned int num_banks; + unsigned int num_chans; + unsigned int vmm_page_size_bytes; + double dram_clock_change_latency_us; + double writeback_dram_clock_change_latency_us; + unsigned int return_bus_width_bytes; + unsigned int voltage_override; + double xfc_bus_transport_time_us; + double xfc_xbuf_latency_tolerance_us; + int use_urgent_burst_bw; struct _vcs_dpi_voltage_scaling_st clock_limits[7]; }; -struct _vcs_dpi_ip_params_st { - unsigned int max_inter_dcn_tile_repeaters; - unsigned int num_dsc; - unsigned int odm_capable; - unsigned int rob_buffer_size_kbytes; - unsigned int det_buffer_size_kbytes; - unsigned int dpte_buffer_size_in_pte_reqs; - unsigned int pde_proc_buffer_size_64k_reqs; - unsigned int dpp_output_buffer_pixels; - unsigned int opp_output_buffer_lines; - unsigned int pixel_chunk_size_kbytes; - unsigned char pte_enable; - unsigned int pte_chunk_size_kbytes; - unsigned int meta_chunk_size_kbytes; - unsigned int writeback_chunk_size_kbytes; - unsigned int line_buffer_size_bits; - unsigned int max_line_buffer_lines; - unsigned int writeback_luma_buffer_size_kbytes; - unsigned int writeback_chroma_buffer_size_kbytes; - unsigned int writeback_chroma_line_buffer_width_pixels; - unsigned int max_page_table_levels; - unsigned int max_num_dpp; - unsigned int max_num_otg; - unsigned int cursor_chunk_size; - unsigned int cursor_buffer_size; - unsigned int max_num_wb; - unsigned int max_dchub_pscl_bw_pix_per_clk; - unsigned int max_pscl_lb_bw_pix_per_clk; - unsigned int max_lb_vscl_bw_pix_per_clk; - unsigned int max_vscl_hscl_bw_pix_per_clk; - double max_hscl_ratio; - double max_vscl_ratio; - unsigned int hscl_mults; - unsigned int vscl_mults; - unsigned int max_hscl_taps; - unsigned int max_vscl_taps; - unsigned int xfc_supported; - unsigned int xfc_fill_constant_bytes; - double dispclk_ramp_margin_percent; - double xfc_fill_bw_overhead_percent; - double underscan_factor; - unsigned int min_vblank_lines; - unsigned int dppclk_delay_subtotal; - unsigned int dispclk_delay_subtotal; - unsigned int dcfclk_cstate_latency; - unsigned int dppclk_delay_scl; - unsigned int dppclk_delay_scl_lb_only; - unsigned int dppclk_delay_cnvc_formatter; - unsigned int dppclk_delay_cnvc_cursor; - unsigned int is_line_buffer_bpp_fixed; - unsigned int line_buffer_fixed_bpp; - unsigned int dcc_supported; +struct _vcs_dpi_ip_params_st { + bool gpuvm_enable; + bool hostvm_enable; + unsigned int gpuvm_max_page_table_levels; + unsigned int hostvm_max_page_table_levels; + unsigned int hostvm_cached_page_table_levels; + unsigned int pte_group_size_bytes; + unsigned int max_inter_dcn_tile_repeaters; + unsigned int num_dsc; + unsigned int odm_capable; + unsigned int rob_buffer_size_kbytes; + unsigned int det_buffer_size_kbytes; + unsigned int dpte_buffer_size_in_pte_reqs; + unsigned int pde_proc_buffer_size_64k_reqs; + unsigned int dpp_output_buffer_pixels; + unsigned int opp_output_buffer_lines; + unsigned int pixel_chunk_size_kbytes; + unsigned char pte_enable; + unsigned int pte_chunk_size_kbytes; + unsigned int meta_chunk_size_kbytes; + unsigned int writeback_chunk_size_kbytes; + unsigned int line_buffer_size_bits; + unsigned int max_line_buffer_lines; + unsigned int writeback_luma_buffer_size_kbytes; + unsigned int writeback_chroma_buffer_size_kbytes; + unsigned int writeback_chroma_line_buffer_width_pixels; + unsigned int max_page_table_levels; + unsigned int max_num_dpp; + unsigned int max_num_otg; + unsigned int cursor_chunk_size; + unsigned int cursor_buffer_size; + unsigned int max_num_wb; + unsigned int max_dchub_pscl_bw_pix_per_clk; + unsigned int max_pscl_lb_bw_pix_per_clk; + unsigned int max_lb_vscl_bw_pix_per_clk; + unsigned int max_vscl_hscl_bw_pix_per_clk; + double max_hscl_ratio; + double max_vscl_ratio; + unsigned int hscl_mults; + unsigned int vscl_mults; + unsigned int max_hscl_taps; + unsigned int max_vscl_taps; + unsigned int xfc_supported; + unsigned int xfc_fill_constant_bytes; + double dispclk_ramp_margin_percent; + double xfc_fill_bw_overhead_percent; + double underscan_factor; + unsigned int min_vblank_lines; + unsigned int dppclk_delay_subtotal; + unsigned int dispclk_delay_subtotal; + unsigned int dcfclk_cstate_latency; + unsigned int dppclk_delay_scl; + unsigned int dppclk_delay_scl_lb_only; + unsigned int dppclk_delay_cnvc_formatter; + unsigned int dppclk_delay_cnvc_cursor; + unsigned int is_line_buffer_bpp_fixed; + unsigned int line_buffer_fixed_bpp; + unsigned int dcc_supported; unsigned int IsLineBufferBppFixed; unsigned int LineBufferFixedBpp; @@ -169,41 +187,45 @@ struct _vcs_dpi_display_xfc_params_st { int xfc_slv_chunk_size_bytes; }; -struct _vcs_dpi_display_pipe_source_params_st { - int source_format; - unsigned char dcc; - unsigned int dcc_override; - unsigned int dcc_rate; - unsigned char dcc_use_global; - unsigned char vm; - unsigned char vm_levels_force_en; - unsigned int vm_levels_force; - int source_scan; - int sw_mode; - int macro_tile_size; - unsigned char is_display_sw; - unsigned int viewport_width; - unsigned int viewport_height; - unsigned int viewport_y_y; - unsigned int viewport_y_c; - unsigned int viewport_width_c; - unsigned int viewport_height_c; - unsigned int data_pitch; - unsigned int data_pitch_c; - unsigned int meta_pitch; - unsigned int meta_pitch_c; - unsigned int cur0_src_width; - int cur0_bpp; - unsigned int cur1_src_width; - int cur1_bpp; - int num_cursors; - unsigned char is_hsplit; - unsigned char dynamic_metadata_enable; - unsigned int dynamic_metadata_lines_before_active; - unsigned int dynamic_metadata_xmit_bytes; - unsigned int hsplit_grp; - unsigned char xfc_enable; - unsigned char xfc_slave; +struct _vcs_dpi_display_pipe_source_params_st { + int source_format; + unsigned char dcc; + unsigned int dcc_override; + unsigned int dcc_rate; + unsigned char dcc_use_global; + unsigned char vm; + bool gpuvm; // gpuvm enabled + bool hostvm; // hostvm enabled + bool gpuvm_levels_force_en; + unsigned int gpuvm_levels_force; + bool hostvm_levels_force_en; + unsigned int hostvm_levels_force; + int source_scan; + int sw_mode; + int macro_tile_size; + unsigned char is_display_sw; + unsigned int viewport_width; + unsigned int viewport_height; + unsigned int viewport_y_y; + unsigned int viewport_y_c; + unsigned int viewport_width_c; + unsigned int viewport_height_c; + unsigned int data_pitch; + unsigned int data_pitch_c; + unsigned int meta_pitch; + unsigned int meta_pitch_c; + unsigned int cur0_src_width; + int cur0_bpp; + unsigned int cur1_src_width; + int cur1_bpp; + int num_cursors; + unsigned char is_hsplit; + unsigned char dynamic_metadata_enable; + unsigned int dynamic_metadata_lines_before_active; + unsigned int dynamic_metadata_xmit_bytes; + unsigned int hsplit_grp; + unsigned char xfc_enable; + unsigned char xfc_slave; struct _vcs_dpi_display_xfc_params_st xfc_params; }; struct writeback_st { @@ -219,335 +241,335 @@ struct writeback_st { double wb_vratio; }; -struct _vcs_dpi_display_output_params_st { - int dp_lanes; - int output_bpp; - int dsc_enable; - int wb_enable; - int num_active_wb; - int opp_input_bpc; - int output_type; - int output_format; - int output_standard; - int dsc_slices; +struct _vcs_dpi_display_output_params_st { + int dp_lanes; + int output_bpp; + int dsc_enable; + int wb_enable; + int num_active_wb; + int output_bpc; + int output_type; + int output_format; + int output_standard; + int dsc_slices; struct writeback_st wb; }; -struct _vcs_dpi_display_bandwidth_st { - double total_bw_consumed_gbps; - double guaranteed_urgent_return_bw_gbps; -}; - -struct _vcs_dpi_scaler_ratio_depth_st { - double hscl_ratio; - double vscl_ratio; - double hscl_ratio_c; - double vscl_ratio_c; - double vinit; - double vinit_c; - double vinit_bot; - double vinit_bot_c; - int lb_depth; - int scl_enable; -}; - -struct _vcs_dpi_scaler_taps_st { - unsigned int htaps; - unsigned int vtaps; - unsigned int htaps_c; - unsigned int vtaps_c; -}; - -struct _vcs_dpi_display_pipe_dest_params_st { - unsigned int recout_width; - unsigned int recout_height; - unsigned int full_recout_width; - unsigned int full_recout_height; - unsigned int hblank_start; - unsigned int hblank_end; - unsigned int vblank_start; - unsigned int vblank_end; - unsigned int htotal; - unsigned int vtotal; - unsigned int vactive; - unsigned int hactive; - unsigned int vstartup_start; - unsigned int vupdate_offset; - unsigned int vupdate_width; - unsigned int vready_offset; - unsigned char interlaced; - unsigned char underscan; - double pixel_rate_mhz; - unsigned char synchronized_vblank_all_planes; - unsigned char otg_inst; - unsigned char odm_split_cnt; - unsigned char odm_combine; -}; - -struct _vcs_dpi_display_pipe_params_st { - display_pipe_source_params_st src; - display_pipe_dest_params_st dest; - scaler_ratio_depth_st scale_ratio_depth; - scaler_taps_st scale_taps; -}; - -struct _vcs_dpi_display_clocks_and_cfg_st { - int voltage; - double dppclk_mhz; - double refclk_mhz; - double dispclk_mhz; - double dcfclk_mhz; - double socclk_mhz; -}; - -struct _vcs_dpi_display_e2e_pipe_params_st { - display_pipe_params_st pipe; - display_output_params_st dout; - display_clocks_and_cfg_st clks_cfg; -}; - -struct _vcs_dpi_dchub_buffer_sizing_st { - unsigned int swath_width_y; - unsigned int swath_height_y; - unsigned int swath_height_c; - unsigned int detail_buffer_size_y; -}; - -struct _vcs_dpi_watermarks_perf_st { - double stutter_eff_in_active_region_percent; - double urgent_latency_supported_us; - double non_urgent_latency_supported_us; - double dram_clock_change_margin_us; - double dram_access_eff_percent; -}; - -struct _vcs_dpi_cstate_pstate_watermarks_st { - double cstate_exit_us; - double cstate_enter_plus_exit_us; - double pstate_change_us; -}; - -struct _vcs_dpi_wm_calc_pipe_params_st { - unsigned int num_dpp; - int voltage; - int output_type; - double dcfclk_mhz; - double socclk_mhz; - double dppclk_mhz; - double pixclk_mhz; - unsigned char interlace_en; - unsigned char pte_enable; - unsigned char dcc_enable; - double dcc_rate; - double bytes_per_pixel_c; - double bytes_per_pixel_y; - unsigned int swath_width_y; - unsigned int swath_height_y; - unsigned int swath_height_c; - unsigned int det_buffer_size_y; - double h_ratio; - double v_ratio; - unsigned int h_taps; - unsigned int h_total; - unsigned int v_total; - unsigned int v_active; - unsigned int e2e_index; - double display_pipe_line_delivery_time; - double read_bw; - unsigned int lines_in_det_y; - unsigned int lines_in_det_y_rounded_down_to_swath; - double full_det_buffering_time; - double dcfclk_deepsleep_mhz_per_plane; -}; - -struct _vcs_dpi_vratio_pre_st { - double vratio_pre_l; - double vratio_pre_c; -}; - -struct _vcs_dpi_display_data_rq_misc_params_st { - unsigned int full_swath_bytes; - unsigned int stored_swath_bytes; - unsigned int blk256_height; - unsigned int blk256_width; - unsigned int req_height; - unsigned int req_width; -}; - -struct _vcs_dpi_display_data_rq_sizing_params_st { - unsigned int chunk_bytes; - unsigned int min_chunk_bytes; - unsigned int meta_chunk_bytes; - unsigned int min_meta_chunk_bytes; - unsigned int mpte_group_bytes; - unsigned int dpte_group_bytes; -}; - -struct _vcs_dpi_display_data_rq_dlg_params_st { - unsigned int swath_width_ub; - unsigned int swath_height; - unsigned int req_per_swath_ub; - unsigned int meta_pte_bytes_per_frame_ub; - unsigned int dpte_req_per_row_ub; - unsigned int dpte_groups_per_row_ub; - unsigned int dpte_row_height; - unsigned int dpte_bytes_per_row_ub; - unsigned int meta_chunks_per_row_ub; - unsigned int meta_req_per_row_ub; - unsigned int meta_row_height; - unsigned int meta_bytes_per_row_ub; -}; - -struct _vcs_dpi_display_cur_rq_dlg_params_st { - unsigned char enable; - unsigned int swath_height; - unsigned int req_per_line; -}; - -struct _vcs_dpi_display_rq_dlg_params_st { - display_data_rq_dlg_params_st rq_l; - display_data_rq_dlg_params_st rq_c; - display_cur_rq_dlg_params_st rq_cur0; -}; - -struct _vcs_dpi_display_rq_sizing_params_st { - display_data_rq_sizing_params_st rq_l; - display_data_rq_sizing_params_st rq_c; -}; - -struct _vcs_dpi_display_rq_misc_params_st { - display_data_rq_misc_params_st rq_l; - display_data_rq_misc_params_st rq_c; -}; - -struct _vcs_dpi_display_rq_params_st { - unsigned char yuv420; - unsigned char yuv420_10bpc; - display_rq_misc_params_st misc; - display_rq_sizing_params_st sizing; - display_rq_dlg_params_st dlg; -}; - -struct _vcs_dpi_display_dlg_regs_st { - unsigned int refcyc_h_blank_end; - unsigned int dlg_vblank_end; - unsigned int min_dst_y_next_start; - unsigned int refcyc_per_htotal; - unsigned int refcyc_x_after_scaler; - unsigned int dst_y_after_scaler; - unsigned int dst_y_prefetch; - unsigned int dst_y_per_vm_vblank; - unsigned int dst_y_per_row_vblank; - unsigned int dst_y_per_vm_flip; - unsigned int dst_y_per_row_flip; - unsigned int ref_freq_to_pix_freq; - unsigned int vratio_prefetch; - unsigned int vratio_prefetch_c; - unsigned int refcyc_per_pte_group_vblank_l; - unsigned int refcyc_per_pte_group_vblank_c; - unsigned int refcyc_per_meta_chunk_vblank_l; - unsigned int refcyc_per_meta_chunk_vblank_c; - unsigned int refcyc_per_pte_group_flip_l; - unsigned int refcyc_per_pte_group_flip_c; - unsigned int refcyc_per_meta_chunk_flip_l; - unsigned int refcyc_per_meta_chunk_flip_c; - unsigned int dst_y_per_pte_row_nom_l; - unsigned int dst_y_per_pte_row_nom_c; - unsigned int refcyc_per_pte_group_nom_l; - unsigned int refcyc_per_pte_group_nom_c; - unsigned int dst_y_per_meta_row_nom_l; - unsigned int dst_y_per_meta_row_nom_c; - unsigned int refcyc_per_meta_chunk_nom_l; - unsigned int refcyc_per_meta_chunk_nom_c; - unsigned int refcyc_per_line_delivery_pre_l; - unsigned int refcyc_per_line_delivery_pre_c; - unsigned int refcyc_per_line_delivery_l; - unsigned int refcyc_per_line_delivery_c; - unsigned int chunk_hdl_adjust_cur0; - unsigned int chunk_hdl_adjust_cur1; - unsigned int vready_after_vcount0; - unsigned int dst_y_offset_cur0; - unsigned int dst_y_offset_cur1; - unsigned int xfc_reg_transfer_delay; - unsigned int xfc_reg_precharge_delay; - unsigned int xfc_reg_remote_surface_flip_latency; - unsigned int xfc_reg_prefetch_margin; - unsigned int dst_y_delta_drq_limit; -}; - -struct _vcs_dpi_display_ttu_regs_st { - unsigned int qos_level_low_wm; - unsigned int qos_level_high_wm; - unsigned int min_ttu_vblank; - unsigned int qos_level_flip; - unsigned int refcyc_per_req_delivery_l; - unsigned int refcyc_per_req_delivery_c; - unsigned int refcyc_per_req_delivery_cur0; - unsigned int refcyc_per_req_delivery_cur1; - unsigned int refcyc_per_req_delivery_pre_l; - unsigned int refcyc_per_req_delivery_pre_c; - unsigned int refcyc_per_req_delivery_pre_cur0; - unsigned int refcyc_per_req_delivery_pre_cur1; - unsigned int qos_level_fixed_l; - unsigned int qos_level_fixed_c; - unsigned int qos_level_fixed_cur0; - unsigned int qos_level_fixed_cur1; - unsigned int qos_ramp_disable_l; - unsigned int qos_ramp_disable_c; - unsigned int qos_ramp_disable_cur0; - unsigned int qos_ramp_disable_cur1; -}; - -struct _vcs_dpi_display_data_rq_regs_st { - unsigned int chunk_size; - unsigned int min_chunk_size; - unsigned int meta_chunk_size; - unsigned int min_meta_chunk_size; - unsigned int dpte_group_size; - unsigned int mpte_group_size; - unsigned int swath_height; - unsigned int pte_row_height_linear; -}; - -struct _vcs_dpi_display_rq_regs_st { - display_data_rq_regs_st rq_regs_l; - display_data_rq_regs_st rq_regs_c; - unsigned int drq_expansion_mode; - unsigned int prq_expansion_mode; - unsigned int mrq_expansion_mode; - unsigned int crq_expansion_mode; - unsigned int plane1_base_address; -}; - -struct _vcs_dpi_display_dlg_sys_params_st { - double t_mclk_wm_us; - double t_urg_wm_us; - double t_sr_wm_us; - double t_extra_us; - double mem_trip_us; - double t_srx_delay_us; - double deepsleep_dcfclk_mhz; - double total_flip_bw; - unsigned int total_flip_bytes; -}; - -struct _vcs_dpi_display_dlg_prefetch_param_st { - double prefetch_bw; - unsigned int flip_bytes; -}; - -struct _vcs_dpi_display_pipe_clock_st { - double dcfclk_mhz; - double dispclk_mhz; - double socclk_mhz; - double dscclk_mhz[6]; - double dppclk_mhz[6]; -}; - -struct _vcs_dpi_display_arb_params_st { - int max_req_outstanding; - int min_req_outstanding; - int sat_level_us; +struct _vcs_dpi_display_bandwidth_st { + double total_bw_consumed_gbps; + double guaranteed_urgent_return_bw_gbps; +}; + +struct _vcs_dpi_scaler_ratio_depth_st { + double hscl_ratio; + double vscl_ratio; + double hscl_ratio_c; + double vscl_ratio_c; + double vinit; + double vinit_c; + double vinit_bot; + double vinit_bot_c; + int lb_depth; + int scl_enable; +}; + +struct _vcs_dpi_scaler_taps_st { + unsigned int htaps; + unsigned int vtaps; + unsigned int htaps_c; + unsigned int vtaps_c; +}; + +struct _vcs_dpi_display_pipe_dest_params_st { + unsigned int recout_width; + unsigned int recout_height; + unsigned int full_recout_width; + unsigned int full_recout_height; + unsigned int hblank_start; + unsigned int hblank_end; + unsigned int vblank_start; + unsigned int vblank_end; + unsigned int htotal; + unsigned int vtotal; + unsigned int vactive; + unsigned int hactive; + unsigned int vstartup_start; + unsigned int vupdate_offset; + unsigned int vupdate_width; + unsigned int vready_offset; + unsigned char interlaced; + unsigned char underscan; + double pixel_rate_mhz; + unsigned char synchronized_vblank_all_planes; + unsigned char otg_inst; + unsigned char odm_split_cnt; + unsigned char odm_combine; +}; + +struct _vcs_dpi_display_pipe_params_st { + display_pipe_source_params_st src; + display_pipe_dest_params_st dest; + scaler_ratio_depth_st scale_ratio_depth; + scaler_taps_st scale_taps; +}; + +struct _vcs_dpi_display_clocks_and_cfg_st { + int voltage; + double dppclk_mhz; + double refclk_mhz; + double dispclk_mhz; + double dcfclk_mhz; + double socclk_mhz; +}; + +struct _vcs_dpi_display_e2e_pipe_params_st { + display_pipe_params_st pipe; + display_output_params_st dout; + display_clocks_and_cfg_st clks_cfg; +}; + +struct _vcs_dpi_dchub_buffer_sizing_st { + unsigned int swath_width_y; + unsigned int swath_height_y; + unsigned int swath_height_c; + unsigned int detail_buffer_size_y; +}; + +struct _vcs_dpi_watermarks_perf_st { + double stutter_eff_in_active_region_percent; + double urgent_latency_supported_us; + double non_urgent_latency_supported_us; + double dram_clock_change_margin_us; + double dram_access_eff_percent; +}; + +struct _vcs_dpi_cstate_pstate_watermarks_st { + double cstate_exit_us; + double cstate_enter_plus_exit_us; + double pstate_change_us; +}; + +struct _vcs_dpi_wm_calc_pipe_params_st { + unsigned int num_dpp; + int voltage; + int output_type; + double dcfclk_mhz; + double socclk_mhz; + double dppclk_mhz; + double pixclk_mhz; + unsigned char interlace_en; + unsigned char pte_enable; + unsigned char dcc_enable; + double dcc_rate; + double bytes_per_pixel_c; + double bytes_per_pixel_y; + unsigned int swath_width_y; + unsigned int swath_height_y; + unsigned int swath_height_c; + unsigned int det_buffer_size_y; + double h_ratio; + double v_ratio; + unsigned int h_taps; + unsigned int h_total; + unsigned int v_total; + unsigned int v_active; + unsigned int e2e_index; + double display_pipe_line_delivery_time; + double read_bw; + unsigned int lines_in_det_y; + unsigned int lines_in_det_y_rounded_down_to_swath; + double full_det_buffering_time; + double dcfclk_deepsleep_mhz_per_plane; +}; + +struct _vcs_dpi_vratio_pre_st { + double vratio_pre_l; + double vratio_pre_c; +}; + +struct _vcs_dpi_display_data_rq_misc_params_st { + unsigned int full_swath_bytes; + unsigned int stored_swath_bytes; + unsigned int blk256_height; + unsigned int blk256_width; + unsigned int req_height; + unsigned int req_width; +}; + +struct _vcs_dpi_display_data_rq_sizing_params_st { + unsigned int chunk_bytes; + unsigned int min_chunk_bytes; + unsigned int meta_chunk_bytes; + unsigned int min_meta_chunk_bytes; + unsigned int mpte_group_bytes; + unsigned int dpte_group_bytes; +}; + +struct _vcs_dpi_display_data_rq_dlg_params_st { + unsigned int swath_width_ub; + unsigned int swath_height; + unsigned int req_per_swath_ub; + unsigned int meta_pte_bytes_per_frame_ub; + unsigned int dpte_req_per_row_ub; + unsigned int dpte_groups_per_row_ub; + unsigned int dpte_row_height; + unsigned int dpte_bytes_per_row_ub; + unsigned int meta_chunks_per_row_ub; + unsigned int meta_req_per_row_ub; + unsigned int meta_row_height; + unsigned int meta_bytes_per_row_ub; +}; + +struct _vcs_dpi_display_cur_rq_dlg_params_st { + unsigned char enable; + unsigned int swath_height; + unsigned int req_per_line; +}; + +struct _vcs_dpi_display_rq_dlg_params_st { + display_data_rq_dlg_params_st rq_l; + display_data_rq_dlg_params_st rq_c; + display_cur_rq_dlg_params_st rq_cur0; +}; + +struct _vcs_dpi_display_rq_sizing_params_st { + display_data_rq_sizing_params_st rq_l; + display_data_rq_sizing_params_st rq_c; +}; + +struct _vcs_dpi_display_rq_misc_params_st { + display_data_rq_misc_params_st rq_l; + display_data_rq_misc_params_st rq_c; +}; + +struct _vcs_dpi_display_rq_params_st { + unsigned char yuv420; + unsigned char yuv420_10bpc; + display_rq_misc_params_st misc; + display_rq_sizing_params_st sizing; + display_rq_dlg_params_st dlg; +}; + +struct _vcs_dpi_display_dlg_regs_st { + unsigned int refcyc_h_blank_end; + unsigned int dlg_vblank_end; + unsigned int min_dst_y_next_start; + unsigned int refcyc_per_htotal; + unsigned int refcyc_x_after_scaler; + unsigned int dst_y_after_scaler; + unsigned int dst_y_prefetch; + unsigned int dst_y_per_vm_vblank; + unsigned int dst_y_per_row_vblank; + unsigned int dst_y_per_vm_flip; + unsigned int dst_y_per_row_flip; + unsigned int ref_freq_to_pix_freq; + unsigned int vratio_prefetch; + unsigned int vratio_prefetch_c; + unsigned int refcyc_per_pte_group_vblank_l; + unsigned int refcyc_per_pte_group_vblank_c; + unsigned int refcyc_per_meta_chunk_vblank_l; + unsigned int refcyc_per_meta_chunk_vblank_c; + unsigned int refcyc_per_pte_group_flip_l; + unsigned int refcyc_per_pte_group_flip_c; + unsigned int refcyc_per_meta_chunk_flip_l; + unsigned int refcyc_per_meta_chunk_flip_c; + unsigned int dst_y_per_pte_row_nom_l; + unsigned int dst_y_per_pte_row_nom_c; + unsigned int refcyc_per_pte_group_nom_l; + unsigned int refcyc_per_pte_group_nom_c; + unsigned int dst_y_per_meta_row_nom_l; + unsigned int dst_y_per_meta_row_nom_c; + unsigned int refcyc_per_meta_chunk_nom_l; + unsigned int refcyc_per_meta_chunk_nom_c; + unsigned int refcyc_per_line_delivery_pre_l; + unsigned int refcyc_per_line_delivery_pre_c; + unsigned int refcyc_per_line_delivery_l; + unsigned int refcyc_per_line_delivery_c; + unsigned int chunk_hdl_adjust_cur0; + unsigned int chunk_hdl_adjust_cur1; + unsigned int vready_after_vcount0; + unsigned int dst_y_offset_cur0; + unsigned int dst_y_offset_cur1; + unsigned int xfc_reg_transfer_delay; + unsigned int xfc_reg_precharge_delay; + unsigned int xfc_reg_remote_surface_flip_latency; + unsigned int xfc_reg_prefetch_margin; + unsigned int dst_y_delta_drq_limit; +}; + +struct _vcs_dpi_display_ttu_regs_st { + unsigned int qos_level_low_wm; + unsigned int qos_level_high_wm; + unsigned int min_ttu_vblank; + unsigned int qos_level_flip; + unsigned int refcyc_per_req_delivery_l; + unsigned int refcyc_per_req_delivery_c; + unsigned int refcyc_per_req_delivery_cur0; + unsigned int refcyc_per_req_delivery_cur1; + unsigned int refcyc_per_req_delivery_pre_l; + unsigned int refcyc_per_req_delivery_pre_c; + unsigned int refcyc_per_req_delivery_pre_cur0; + unsigned int refcyc_per_req_delivery_pre_cur1; + unsigned int qos_level_fixed_l; + unsigned int qos_level_fixed_c; + unsigned int qos_level_fixed_cur0; + unsigned int qos_level_fixed_cur1; + unsigned int qos_ramp_disable_l; + unsigned int qos_ramp_disable_c; + unsigned int qos_ramp_disable_cur0; + unsigned int qos_ramp_disable_cur1; +}; + +struct _vcs_dpi_display_data_rq_regs_st { + unsigned int chunk_size; + unsigned int min_chunk_size; + unsigned int meta_chunk_size; + unsigned int min_meta_chunk_size; + unsigned int dpte_group_size; + unsigned int mpte_group_size; + unsigned int swath_height; + unsigned int pte_row_height_linear; +}; + +struct _vcs_dpi_display_rq_regs_st { + display_data_rq_regs_st rq_regs_l; + display_data_rq_regs_st rq_regs_c; + unsigned int drq_expansion_mode; + unsigned int prq_expansion_mode; + unsigned int mrq_expansion_mode; + unsigned int crq_expansion_mode; + unsigned int plane1_base_address; +}; + +struct _vcs_dpi_display_dlg_sys_params_st { + double t_mclk_wm_us; + double t_urg_wm_us; + double t_sr_wm_us; + double t_extra_us; + double mem_trip_us; + double t_srx_delay_us; + double deepsleep_dcfclk_mhz; + double total_flip_bw; + unsigned int total_flip_bytes; +}; + +struct _vcs_dpi_display_dlg_prefetch_param_st { + double prefetch_bw; + unsigned int flip_bytes; +}; + +struct _vcs_dpi_display_pipe_clock_st { + double dcfclk_mhz; + double dispclk_mhz; + double socclk_mhz; + double dscclk_mhz[6]; + double dppclk_mhz[6]; +}; + +struct _vcs_dpi_display_arb_params_st { + int max_req_outstanding; + int min_req_outstanding; + int sat_level_us; }; #endif /*__DISPLAY_MODE_STRUCTS_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h index f9cf08357989..e8ce08567cd8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h @@ -35,6 +35,16 @@ static inline double dml_min(double a, double b) return (double) dcn_bw_min2(a, b); } +static inline double dml_min3(double a, double b, double c) +{ + return dml_min(dml_min(a, b), c); +} + +static inline double dml_min4(double a, double b, double c, double d) +{ + return dml_min(dml_min(a, b), dml_min(c, d)); +} + static inline double dml_max(double a, double b) { return (double) dcn_bw_max2(a, b); -- cgit v1.2.1 From 66dec27a987bfcd2572bfc7520826b11340d264f Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 24 Apr 2018 15:21:33 -0400 Subject: drm/amd/display: Fix up dm logging functionality Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 5 ----- .../gpu/drm/amd/display/dc/basics/log_helpers.c | 1 - drivers/gpu/drm/amd/display/dc/basics/logger.c | 1 + drivers/gpu/drm/amd/display/dc/dm_services.h | 4 ---- drivers/gpu/drm/amd/display/modules/stats/stats.c | 24 ++++++++++++++-------- 5 files changed, 17 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ca0b08bfa2cf..bd449351803f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -330,11 +330,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( return true; } -bool dm_helpers_dc_conn_log(struct dc_context *ctx, struct log_entry *entry, enum dc_log_type event) -{ - return true; -} - void dm_dtn_log_begin(struct dc_context *ctx) {} diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c index 854678a0c54b..021451549ff7 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c @@ -94,7 +94,6 @@ void dc_conn_log(struct dc_context *ctx, dm_logger_append(&entry, "%2.2X ", hex_data[i]); dm_logger_append(&entry, "^\n"); - dm_helpers_dc_conn_log(ctx, &entry, event); fail: dm_logger_close(&entry); diff --git a/drivers/gpu/drm/amd/display/dc/basics/logger.c b/drivers/gpu/drm/amd/display/dc/basics/logger.c index 0001a3c5b862..738a818d58d1 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/logger.c +++ b/drivers/gpu/drm/amd/display/dc/basics/logger.c @@ -402,3 +402,4 @@ cleanup: entry->max_buf_bytes = 0; } } + diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h index 8eafe1af8a5e..4ff9b2bba178 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_services.h +++ b/drivers/gpu/drm/amd/display/dc/dm_services.h @@ -355,10 +355,6 @@ void dm_perf_trace_timestamp(const char *func_name, unsigned int line); /* * Debug and verification hooks */ -bool dm_helpers_dc_conn_log( - struct dc_context *ctx, - struct log_entry *entry, - enum dc_log_type event); void dm_dtn_log_begin(struct dc_context *ctx); void dm_dtn_log_append_v(struct dc_context *ctx, const char *msg, ...); diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index d16aac7b30b3..ae2d92b73cf1 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -168,6 +168,7 @@ void mod_stats_dump(struct mod_stats *mod_stats) struct core_stats *core_stats = NULL; struct stats_time_cache *time = NULL; unsigned int index = 0; + struct log_entry log_entry; if (mod_stats == NULL) return; @@ -177,17 +178,22 @@ void mod_stats_dump(struct mod_stats *mod_stats) logger = dc->ctx->logger; time = core_stats->time; - dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Caps=="); - dm_logger_write(logger, LOG_DISPLAYSTATS, " "); + dm_logger_open( + dc->ctx->logger, + &log_entry, + LOG_DISPLAYSTATS); - dm_logger_write(logger, LOG_DISPLAYSTATS, "==Display Stats=="); - dm_logger_write(logger, LOG_DISPLAYSTATS, " "); + dm_logger_append(&log_entry, "==Display Caps==\n"); + dm_logger_append(&log_entry, "\n"); - dm_logger_write(logger, LOG_DISPLAYSTATS, + dm_logger_append(&log_entry, "==Display Stats==\n"); + dm_logger_append(&log_entry, "\n"); + + dm_logger_append(&log_entry, "%10s %10s %10s %10s %10s" " %11s %11s %17s %10s %14s" " %10s %10s %10s %10s %10s" - " %10s %10s %10s %10s", + " %10s %10s %10s %10s\n", "render", "avgRender", "minWindow", "midPoint", "maxWindow", "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip", @@ -197,11 +203,11 @@ void mod_stats_dump(struct mod_stats *mod_stats) "vSyncTime4", "vSyncTime5", "flags"); for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { - dm_logger_write(logger, LOG_DISPLAYSTATS, + dm_logger_append(&log_entry, "%10u %10u %10u %10u %10u" " %11u %11u %17u %10u %14u" " %10u %10u %10u %10u %10u" - " %10u %10u %10u %10u", + " %10u %10u %10u %10u\n", time[i].render_time_in_us, time[i].avg_render_time_in_us_last_ten, time[i].min_window, @@ -222,6 +228,8 @@ void mod_stats_dump(struct mod_stats *mod_stats) time[i].v_sync_time_in_us[4], time[i].flags); } + + dm_logger_close(&log_entry); } void mod_stats_reset_data(struct mod_stats *mod_stats) -- cgit v1.2.1 From 5103c5688518ea16c7f2f864b784c1266cd13c89 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Tue, 24 Apr 2018 15:36:27 -0400 Subject: drm/amd/display: use macro for logs Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/include/logger_interface.h | 9 +++++++++ drivers/gpu/drm/amd/display/modules/stats/stats.c | 19 ++++++------------- 2 files changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h index 28dee960d509..dc98d6d4b2bd 100644 --- a/drivers/gpu/drm/amd/display/include/logger_interface.h +++ b/drivers/gpu/drm/amd/display/include/logger_interface.h @@ -190,4 +190,13 @@ void context_clock_trace( } \ } while (0) +#define DISPLAY_STATS_BEGIN(entry) \ + dm_logger_open(dc->ctx->logger, &entry, LOG_DISPLAYSTATS) + +#define DISPLAY_STATS(msg, ...) \ + dm_logger_append(&log_entry, msg, ##__VA_ARGS__) + +#define DISPLAY_STATS_END(entry) \ + dm_logger_close(&entry) + #endif /* __DAL_LOGGER_INTERFACE_H__ */ diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index ae2d92b73cf1..45acdbc3c08a 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -178,19 +178,13 @@ void mod_stats_dump(struct mod_stats *mod_stats) logger = dc->ctx->logger; time = core_stats->time; - dm_logger_open( - dc->ctx->logger, - &log_entry, - LOG_DISPLAYSTATS); + DISPLAY_STATS_BEGIN(log_entry); - dm_logger_append(&log_entry, "==Display Caps==\n"); - dm_logger_append(&log_entry, "\n"); + DISPLAY_STATS("==Display Caps==\n"); - dm_logger_append(&log_entry, "==Display Stats==\n"); - dm_logger_append(&log_entry, "\n"); + DISPLAY_STATS("==Display Stats==\n"); - dm_logger_append(&log_entry, - "%10s %10s %10s %10s %10s" + DISPLAY_STATS("%10s %10s %10s %10s %10s" " %11s %11s %17s %10s %14s" " %10s %10s %10s %10s %10s" " %10s %10s %10s %10s\n", @@ -203,8 +197,7 @@ void mod_stats_dump(struct mod_stats *mod_stats) "vSyncTime4", "vSyncTime5", "flags"); for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { - dm_logger_append(&log_entry, - "%10u %10u %10u %10u %10u" + DISPLAY_STATS("%10u %10u %10u %10u %10u" " %11u %11u %17u %10u %14u" " %10u %10u %10u %10u %10u" " %10u %10u %10u %10u\n", @@ -229,7 +222,7 @@ void mod_stats_dump(struct mod_stats *mod_stats) time[i].flags); } - dm_logger_close(&log_entry); + DISPLAY_STATS_END(log_entry); } void mod_stats_reset_data(struct mod_stats *mod_stats) -- cgit v1.2.1 From eb815442e840e436108ae4112fa80fc2e7ff47f3 Mon Sep 17 00:00:00 2001 From: Samson Tam Date: Fri, 13 Apr 2018 18:38:56 -0400 Subject: drm/amd/display: don't create new dc_sink if nothing changed at detection Signed-off-by: Samson Tam Reviewed-by: Aric Cyr Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 95 ++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ea5d5ffd5522..2fa521812d23 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -469,6 +469,13 @@ static void link_disconnect_sink(struct dc_link *link) link->dpcd_sink_count = 0; } +static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) +{ + dc_sink_release(link->local_sink); + link->local_sink = prev_sink; +} + + static bool detect_dp( struct dc_link *link, struct display_sink_capability *sink_caps, @@ -551,6 +558,17 @@ static bool detect_dp( return true; } +static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) +{ + if (old_edid->length != new_edid->length) + return false; + + if (new_edid->length == 0) + return false; + + return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); +} + bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; @@ -558,9 +576,13 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) uint8_t i; bool converter_disable_audio = false; struct audio_support *aud_support = &link->dc->res_pool->audio_support; + bool same_edid = false; enum dc_edid_status edid_status; struct dc_context *dc_ctx = link->ctx; struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + struct dpcd_caps prev_dpcd_caps; + bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; DC_LOGGER_INIT(link->ctx->logger); if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) @@ -575,6 +597,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) link->local_sink) return true; + prev_sink = link->local_sink; + if (prev_sink != NULL) { + dc_sink_retain(prev_sink); + memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); + } link_disconnect_sink(link); if (new_connection_type != dc_connection_none) { @@ -616,14 +643,25 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) link, &sink_caps, &converter_disable_audio, - aud_support, reason)) + aud_support, reason)) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); return false; + } + // Check if dpcp block is the same + if (prev_sink != NULL) { + if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) + same_dpcd = false; + } /* Active dongle downstream unplug */ if (link->type == dc_connection_active_dongle && link->dpcd_caps.sink_count. - bits.SINK_COUNT == 0) + bits.SINK_COUNT == 0) { + if (prev_sink != NULL) + dc_sink_release(prev_sink); return true; + } if (link->type == dc_connection_mst_branch) { LINK_INFO("link=%d, mst branch is now Connected\n", @@ -634,6 +672,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * pbn_per_slot value leading to exception on dc_fixpt_div() */ link->verified_link_cap = link->reported_link_cap; + if (prev_sink != NULL) + dc_sink_release(prev_sink); return false; } @@ -643,6 +683,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) default: DC_ERROR("Invalid connector type! signal:%d\n", link->connector_signal); + if (prev_sink != NULL) + dc_sink_release(prev_sink); return false; } /* switch() */ @@ -665,6 +707,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) sink = dc_sink_create(&sink_init_data); if (!sink) { DC_ERROR("Failed to create sink!\n"); + if (prev_sink != NULL) + dc_sink_release(prev_sink); return false; } @@ -688,22 +732,33 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) break; } - if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && - sink_caps.transaction_type == - DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { - /* - * TODO debug why Dell 2413 doesn't like - * two link trainings - */ + // Check if edid is the same + if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK))) + same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); - /* deal with non-mst cases */ - dp_hbr_verify_link_cap(link, &link->reported_link_cap); - } + // If both edid and dpcd are the same, then discard new sink and revert back to original sink + if ((same_edid) && (same_dpcd)) { + link_disconnect_remap(prev_sink, link); + sink = prev_sink; + prev_sink = NULL; + } else { + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + sink_caps.transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + /* + * TODO debug why Dell 2413 doesn't like + * two link trainings + */ + + /* deal with non-mst cases */ + dp_hbr_verify_link_cap(link, &link->reported_link_cap); + } - /* HDMI-DVI Dongle */ - if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && - !sink->edid_caps.edid_hdmi) - sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + /* HDMI-DVI Dongle */ + if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && + !sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + } /* Connectivity log: detection */ for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) { @@ -762,10 +817,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) sink_caps.signal = SIGNAL_TYPE_NONE; } - LINK_INFO("link=%d, dc_sink_in=%p is now %s\n", + LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n", link->link_index, sink, (sink_caps.signal == SIGNAL_TYPE_NONE ? - "Disconnected":"Connected")); + "Disconnected":"Connected"), prev_sink, + same_dpcd, same_edid); + + if (prev_sink != NULL) + dc_sink_release(prev_sink); return true; } -- cgit v1.2.1 From 8fc06ebc2bb719cddb041bcb14b5ca87adbcd57f Mon Sep 17 00:00:00 2001 From: Xingyue Tao Date: Thu, 19 Apr 2018 16:23:12 -0400 Subject: drm/amd/display: Only limit VSR downscaling when actually downscaling Signed-off-by: Xingyue Tao Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 2da138904312..46a35c7f01df 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -145,18 +145,17 @@ bool dpp_get_optimal_number_of_taps( else pixel_width = scl_data->viewport.width; + /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && - scl_data->viewport.height != scl_data->v_active) { - - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ - if (dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && - scl_data->format == PIXEL_FORMAT_FP16) - return false; - - if (dpp->ctx->dc->debug.max_downscale_src_width != 0 && - scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) - return false; - } + scl_data->viewport.height != scl_data->v_active && + dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT && + scl_data->format == PIXEL_FORMAT_FP16) + return false; + + if (scl_data->viewport.width > scl_data->h_active && + dpp->ctx->dc->debug.max_downscale_src_width != 0 && + scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width) + return false; /* TODO: add lb check */ -- cgit v1.2.1 From 109ece8d43cdb491a968b3690e947e27225f886e Mon Sep 17 00:00:00 2001 From: Jun Lei Date: Thu, 26 Apr 2018 10:24:25 -0400 Subject: drm/amd/display: constify a few dc_surface_update fields Signed-off-by: Jun Lei Reviewed-by: Jun Lei Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 7a9f600662ce..9cfde0ccf4e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -503,18 +503,18 @@ struct dc_surface_update { struct dc_plane_state *surface; /* isr safe update parameters. null means no updates */ - struct dc_flip_addrs *flip_addr; - struct dc_plane_info *plane_info; - struct dc_scaling_info *scaling_info; + const struct dc_flip_addrs *flip_addr; + const struct dc_plane_info *plane_info; + const struct dc_scaling_info *scaling_info; /* following updates require alloc/sleep/spin that is not isr safe, * null means no updates */ - struct dc_gamma *gamma; - struct dc_transfer_func *in_transfer_func; + const struct dc_gamma *gamma; + const struct dc_transfer_func *in_transfer_func; - struct dc_csc_transform *input_csc_color_matrix; - struct fixed31_32 *coeff_reduction_factor; + const struct dc_csc_transform *input_csc_color_matrix; + const struct fixed31_32 *coeff_reduction_factor; }; /* -- cgit v1.2.1 From 6474b2824d71ac6cd1005aff8841dd8bcfa0901d Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Thu, 26 Apr 2018 10:03:44 -0400 Subject: drm/amd/display: Add fullscreen transitions to log Signed-off-by: Anthony Koo Reviewed-by: Aric Cyr Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/modules/inc/mod_stats.h | 4 + drivers/gpu/drm/amd/display/modules/stats/stats.c | 137 +++++++++++++++++---- 2 files changed, 114 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h index 3230e2adb870..3812094b52e8 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_stats.h @@ -46,6 +46,10 @@ void mod_stats_dump(struct mod_stats *mod_stats); void mod_stats_reset_data(struct mod_stats *mod_stats); +void mod_stats_update_event(struct mod_stats *mod_stats, + char *event_string, + unsigned int length); + void mod_stats_update_flip(struct mod_stats *mod_stats, unsigned long timestamp_in_ns); diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 45acdbc3c08a..4b00bae725b9 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -36,9 +36,14 @@ #define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000 #define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000 +#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100 + #define MOD_STATS_NUM_VSYNCS 5 +#define MOD_STATS_EVENT_STRING_MAX 512 struct stats_time_cache { + unsigned int entry_id; + unsigned long flip_timestamp_in_ns; unsigned long vupdate_timestamp_in_ns; @@ -63,15 +68,26 @@ struct stats_time_cache { unsigned int flags; }; +struct stats_event_cache { + unsigned int entry_id; + char event_string[MOD_STATS_EVENT_STRING_MAX]; +}; + struct core_stats { struct mod_stats public; struct dc *dc; + bool enabled; + unsigned int entries; + unsigned int event_entries; + unsigned int entry_id; + struct stats_time_cache *time; unsigned int index; - bool enabled; - unsigned int entries; + struct stats_event_cache *events; + unsigned int event_index; + }; #define MOD_STATS_TO_CORE(mod_stats)\ @@ -125,9 +141,18 @@ struct mod_stats *mod_stats_create(struct dc *dc) else core_stats->entries = reg_data; } + core_stats->time = kzalloc( + sizeof(struct stats_time_cache) * + core_stats->entries, + GFP_KERNEL); - core_stats->time = kzalloc(sizeof(struct stats_time_cache) * core_stats->entries, + + core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT; + core_stats->events = kzalloc( + sizeof(struct stats_event_cache) * + core_stats->event_entries, GFP_KERNEL); + } else { core_stats->entries = 0; } @@ -139,6 +164,10 @@ struct mod_stats *mod_stats_create(struct dc *dc) * handle calculation cases that depend on previous flip data. */ core_stats->index = 1; + core_stats->event_index = 0; + + // Keeps track of ordering within the different stats structures + core_stats->entry_id = 0; return &core_stats->public; @@ -167,6 +196,9 @@ void mod_stats_dump(struct mod_stats *mod_stats) struct dal_logger *logger = NULL; struct core_stats *core_stats = NULL; struct stats_time_cache *time = NULL; + struct stats_event_cache *events = NULL; + unsigned int time_index = 1; + unsigned int event_index = 0; unsigned int index = 0; struct log_entry log_entry; @@ -177,6 +209,7 @@ void mod_stats_dump(struct mod_stats *mod_stats) dc = core_stats->dc; logger = dc->ctx->logger; time = core_stats->time; + events = core_stats->events; DISPLAY_STATS_BEGIN(log_entry); @@ -196,30 +229,39 @@ void mod_stats_dump(struct mod_stats *mod_stats) "vSyncTime1", "vSyncTime2", "vSyncTime3", "vSyncTime4", "vSyncTime5", "flags"); - for (int i = 0; i < core_stats->index && i < core_stats->entries; i++) { - DISPLAY_STATS("%10u %10u %10u %10u %10u" - " %11u %11u %17u %10u %14u" - " %10u %10u %10u %10u %10u" - " %10u %10u %10u %10u\n", - time[i].render_time_in_us, - time[i].avg_render_time_in_us_last_ten, - time[i].min_window, - time[i].lfc_mid_point_in_us, - time[i].max_window, - time[i].vsync_to_flip_time_in_us, - time[i].flip_to_vsync_time_in_us, - time[i].num_vsync_between_flips, - time[i].num_frames_inserted, - time[i].inserted_duration_in_us, - time[i].v_total_min, - time[i].v_total_max, - time[i].event_triggers, - time[i].v_sync_time_in_us[0], - time[i].v_sync_time_in_us[1], - time[i].v_sync_time_in_us[2], - time[i].v_sync_time_in_us[3], - time[i].v_sync_time_in_us[4], - time[i].flags); + for (int i = 0; i < core_stats->entry_id; i++) { + if (event_index < core_stats->event_index && + i == events[event_index].entry_id) { + DISPLAY_STATS("%s\n", events[event_index].event_string); + event_index++; + } else if (time_index < core_stats->index && + i == time[time_index].entry_id) { + DISPLAY_STATS("%10u %10u %10u %10u %10u" + " %11u %11u %17u %10u %14u" + " %10u %10u %10u %10u %10u" + " %10u %10u %10u %10u\n", + time[time_index].render_time_in_us, + time[time_index].avg_render_time_in_us_last_ten, + time[time_index].min_window, + time[time_index].lfc_mid_point_in_us, + time[time_index].max_window, + time[time_index].vsync_to_flip_time_in_us, + time[time_index].flip_to_vsync_time_in_us, + time[time_index].num_vsync_between_flips, + time[time_index].num_frames_inserted, + time[time_index].inserted_duration_in_us, + time[time_index].v_total_min, + time[time_index].v_total_max, + time[time_index].event_triggers, + time[time_index].v_sync_time_in_us[0], + time[time_index].v_sync_time_in_us[1], + time[time_index].v_sync_time_in_us[2], + time[time_index].v_sync_time_in_us[3], + time[time_index].v_sync_time_in_us[4], + time[time_index].flags); + + time_index++; + } } DISPLAY_STATS_END(log_entry); @@ -239,7 +281,46 @@ void mod_stats_reset_data(struct mod_stats *mod_stats) memset(core_stats->time, 0, sizeof(struct stats_time_cache) * core_stats->entries); + memset(core_stats->events, 0, + sizeof(struct stats_event_cache) * core_stats->event_entries); + core_stats->index = 1; + core_stats->event_index = 0; + + // Keeps track of ordering within the different stats structures + core_stats->entry_id = 0; +} + +void mod_stats_update_event(struct mod_stats *mod_stats, + char *event_string, + unsigned int length) +{ + struct core_stats *core_stats = NULL; + struct stats_event_cache *events = NULL; + unsigned int index = 0; + unsigned int copy_length = 0; + + if (mod_stats == NULL) + return; + + core_stats = MOD_STATS_TO_CORE(mod_stats); + + if (core_stats->index >= core_stats->entries) + return; + + events = core_stats->events; + index = core_stats->event_index; + + copy_length = length; + if (length > MOD_STATS_EVENT_STRING_MAX) + copy_length = MOD_STATS_EVENT_STRING_MAX; + + memcpy(&events[index].event_string, event_string, copy_length); + events[index].event_string[copy_length - 1] = '\0'; + + events[index].entry_id = core_stats->entry_id; + core_stats->event_index++; + core_stats->entry_id++; } void mod_stats_update_flip(struct mod_stats *mod_stats, @@ -280,7 +361,9 @@ void mod_stats_update_flip(struct mod_stats *mod_stats, (timestamp_in_ns - time[index - 1].vupdate_timestamp_in_ns) / 1000; + time[index].entry_id = core_stats->entry_id; core_stats->index++; + core_stats->entry_id++; } void mod_stats_update_vupdate(struct mod_stats *mod_stats, -- cgit v1.2.1 From dab911d535ae24a39b4e383c0dffaa3e5583883d Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 27 Apr 2018 15:23:23 -0400 Subject: drm/amd/display: fix bug with index check Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/stats/stats.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index 4b00bae725b9..fe9e4b316d3a 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -305,7 +305,7 @@ void mod_stats_update_event(struct mod_stats *mod_stats, core_stats = MOD_STATS_TO_CORE(mod_stats); - if (core_stats->index >= core_stats->entries) + if (core_stats->event_index >= core_stats->event_entries) return; events = core_stats->events; -- cgit v1.2.1 From a944744ba517256fcc9311e12c083563cbbe7c88 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Fri, 27 Apr 2018 17:26:25 -0400 Subject: drm/amd/display: Clear underflow status for debug purposes We want to keep underflow sticky bit on for the longevity tests outside of test environment. For debug purposes it is, however, useful to clear underflow status after the test that caused it so that the following tests are not affected. This change fullfils both requirements by clearing the underflow only from within Windows or Diags test environment. Signed-off-by: Nikola Cornij Reviewed-by: Nikola Cornij Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index c452972bf1c3..f8e0576af6e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -326,6 +326,12 @@ void dcn10_log_hw_state(struct dc *dc) s.h_total, s.v_total, s.underflow_occurred_status); + + // Clear underflow for debug purposes + // We want to keep underflow sticky bit on for the longevity tests outside of test environment. + // This function is called only from Windows or Diags test environment, hence it's safe to clear + // it from here without affecting the original intent. + tg->funcs->clear_optc_underflow(tg); } DTN_INFO("\n"); -- cgit v1.2.1 From f0cd0a346dfd1df4b691fe38dafb51911392fbce Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Thu, 26 Apr 2018 14:06:00 -0400 Subject: drm/amd/display: DCN1 link encoder Create DCN1 link encoder files and update AUX and HPD register access. Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 2 - drivers/gpu/drm/amd/display/dc/dcn10/Makefile | 2 +- .../drm/amd/display/dc/dcn10/dcn10_link_encoder.c | 1362 ++++++++++++++++++++ .../drm/amd/display/dc/dcn10/dcn10_link_encoder.h | 330 +++++ .../gpu/drm/amd/display/dc/dcn10/dcn10_resource.c | 43 +- 5 files changed, 1716 insertions(+), 23 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c create mode 100644 drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 7c866a7d5e77..82cd1d6e6e59 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -11,8 +11,6 @@ #include "dc_link_dp.h" #include "dc_link_ddc.h" #include "dm_helpers.h" -#include "dce/dce_link_encoder.h" -#include "dce/dce_stream_encoder.h" #include "dpcd_defs.h" enum dc_status core_link_read_dpcd( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile index 5c69743a4b4f..84f52c63d95c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile @@ -26,7 +26,7 @@ DCN10 = dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \ dcn10_dpp.o dcn10_opp.o dcn10_optc.o \ dcn10_hubp.o dcn10_mpc.o \ dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \ - dcn10_hubbub.o dcn10_stream_encoder.o + dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c new file mode 100644 index 000000000000..21fa40ac0786 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c @@ -0,0 +1,1362 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include "core_types.h" +#include "link_encoder.h" +#include "dcn10_link_encoder.h" +#include "stream_encoder.h" +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + + +/* + * @brief + * Trigger Source Select + * ASIC-dependent, actual values for register programming + */ +#define DCN10_DIG_FE_SOURCE_SELECT_INVALID 0x0 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGA 0x1 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGB 0x2 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGC 0x4 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGD 0x08 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGE 0x10 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGF 0x20 +#define DCN10_DIG_FE_SOURCE_SELECT_DIGG 0x40 + +enum { + DP_MST_UPDATE_MAX_RETRY = 50 +}; + + + +static void aux_initialize(struct dcn10_link_encoder *enc10); + + +static const struct link_encoder_funcs dcn10_lnk_enc_funcs = { + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = dcn10_link_encoder_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn10_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output, + .disable_output = dcn10_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy +}; + +static enum bp_result link_transmitter_control( + struct dcn10_link_encoder *enc10, + struct bp_transmitter_control *cntl) +{ + enum bp_result result; + struct dc_bios *bp = enc10->base.ctx->dc_bios; + + result = bp->funcs->transmitter_control(bp, cntl); + + return result; +} + +static void enable_phy_bypass_mode( + struct dcn10_link_encoder *enc10, + bool enable) +{ + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_UPDATE(DP_DPHY_CNTL, DPHY_BYPASS, enable); + +} + +static void disable_prbs_symbols( + struct dcn10_link_encoder *enc10, + bool disable) +{ + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_UPDATE_4(DP_DPHY_CNTL, + DPHY_ATEST_SEL_LANE0, disable, + DPHY_ATEST_SEL_LANE1, disable, + DPHY_ATEST_SEL_LANE2, disable, + DPHY_ATEST_SEL_LANE3, disable); +} + +static void disable_prbs_mode( + struct dcn10_link_encoder *enc10) +{ + REG_UPDATE(DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, 0); +} + +static void program_pattern_symbols( + struct dcn10_link_encoder *enc10, + uint16_t pattern_symbols[8]) +{ + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_SET_3(DP_DPHY_SYM0, 0, + DPHY_SYM1, pattern_symbols[0], + DPHY_SYM2, pattern_symbols[1], + DPHY_SYM3, pattern_symbols[2]); + + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_SET_3(DP_DPHY_SYM1, 0, + DPHY_SYM4, pattern_symbols[3], + DPHY_SYM5, pattern_symbols[4], + DPHY_SYM6, pattern_symbols[5]); + + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_SET_2(DP_DPHY_SYM2, 0, + DPHY_SYM7, pattern_symbols[6], + DPHY_SYM8, pattern_symbols[7]); +} + +static void set_dp_phy_pattern_d102( + struct dcn10_link_encoder *enc10) +{ + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); + + /* For 10-bit PRBS or debug symbols + * please use the following sequence: + * + * Enable debug symbols on the lanes + */ + disable_prbs_symbols(enc10, true); + + /* Disable PRBS mode */ + disable_prbs_mode(enc10); + + /* Program debug symbols to be output */ + { + uint16_t pattern_symbols[8] = { + 0x2AA, 0x2AA, 0x2AA, 0x2AA, + 0x2AA, 0x2AA, 0x2AA, 0x2AA + }; + + program_pattern_symbols(enc10, pattern_symbols); + } + + /* Enable phy bypass mode to enable the test pattern */ + + enable_phy_bypass_mode(enc10, true); +} + +static void set_link_training_complete( + struct dcn10_link_encoder *enc10, + bool complete) +{ + /* This register resides in DP back end block; + * transmitter is used for the offset + */ + REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, complete); + +} + +void dcn10_link_encoder_set_dp_phy_pattern_training_pattern( + struct link_encoder *enc, + uint32_t index) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + /* Write Training Pattern */ + + REG_WRITE(DP_DPHY_TRAINING_PATTERN_SEL, index); + + /* Set HW Register Training Complete to false */ + + set_link_training_complete(enc10, false); + + /* Disable PHY Bypass mode to output Training Pattern */ + + enable_phy_bypass_mode(enc10, false); + + /* Disable PRBS mode */ + disable_prbs_mode(enc10); +} + +static void setup_panel_mode( + struct dcn10_link_encoder *enc10, + enum dp_panel_mode panel_mode) +{ + uint32_t value; + + ASSERT(REG(DP_DPHY_INTERNAL_CTRL)); + value = REG_READ(DP_DPHY_INTERNAL_CTRL); + + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + value = 0x1; + break; + case DP_PANEL_MODE_SPECIAL: + value = 0x11; + break; + default: + value = 0x0; + break; + } + + REG_WRITE(DP_DPHY_INTERNAL_CTRL, value); +} + +static void set_dp_phy_pattern_symbol_error( + struct dcn10_link_encoder *enc10) +{ + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); + + /* program correct panel mode*/ + setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT); + + /* A PRBS23 pattern is used for most DP electrical measurements. */ + + /* Enable PRBS symbols on the lanes */ + disable_prbs_symbols(enc10, false); + + /* For PRBS23 Set bit DPHY_PRBS_SEL=1 and Set bit DPHY_PRBS_EN=1 */ + REG_UPDATE_2(DP_DPHY_PRBS_CNTL, + DPHY_PRBS_SEL, 1, + DPHY_PRBS_EN, 1); + + /* Enable phy bypass mode to enable the test pattern */ + enable_phy_bypass_mode(enc10, true); +} + +static void set_dp_phy_pattern_prbs7( + struct dcn10_link_encoder *enc10) +{ + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); + + /* A PRBS7 pattern is used for most DP electrical measurements. */ + + /* Enable PRBS symbols on the lanes */ + disable_prbs_symbols(enc10, false); + + /* For PRBS7 Set bit DPHY_PRBS_SEL=0 and Set bit DPHY_PRBS_EN=1 */ + REG_UPDATE_2(DP_DPHY_PRBS_CNTL, + DPHY_PRBS_SEL, 0, + DPHY_PRBS_EN, 1); + + /* Enable phy bypass mode to enable the test pattern */ + enable_phy_bypass_mode(enc10, true); +} + +static void set_dp_phy_pattern_80bit_custom( + struct dcn10_link_encoder *enc10, + const uint8_t *pattern) +{ + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); + + /* Enable debug symbols on the lanes */ + + disable_prbs_symbols(enc10, true); + + /* Enable PHY bypass mode to enable the test pattern */ + /* TODO is it really needed ? */ + + enable_phy_bypass_mode(enc10, true); + + /* Program 80 bit custom pattern */ + { + uint16_t pattern_symbols[8]; + + pattern_symbols[0] = + ((pattern[1] & 0x03) << 8) | pattern[0]; + pattern_symbols[1] = + ((pattern[2] & 0x0f) << 6) | ((pattern[1] >> 2) & 0x3f); + pattern_symbols[2] = + ((pattern[3] & 0x3f) << 4) | ((pattern[2] >> 4) & 0x0f); + pattern_symbols[3] = + (pattern[4] << 2) | ((pattern[3] >> 6) & 0x03); + pattern_symbols[4] = + ((pattern[6] & 0x03) << 8) | pattern[5]; + pattern_symbols[5] = + ((pattern[7] & 0x0f) << 6) | ((pattern[6] >> 2) & 0x3f); + pattern_symbols[6] = + ((pattern[8] & 0x3f) << 4) | ((pattern[7] >> 4) & 0x0f); + pattern_symbols[7] = + (pattern[9] << 2) | ((pattern[8] >> 6) & 0x03); + + program_pattern_symbols(enc10, pattern_symbols); + } + + /* Enable phy bypass mode to enable the test pattern */ + + enable_phy_bypass_mode(enc10, true); +} + +static void set_dp_phy_pattern_hbr2_compliance_cp2520_2( + struct dcn10_link_encoder *enc10, + unsigned int cp2520_pattern) +{ + + /* previously there is a register DP_HBR2_EYE_PATTERN + * that is enabled to get the pattern. + * But it does not work with the latest spec change, + * so we are programming the following registers manually. + * + * The following settings have been confirmed + * by Nick Chorney and Sandra Liu + */ + + /* Disable PHY Bypass mode to setup the test pattern */ + + enable_phy_bypass_mode(enc10, false); + + /* Setup DIG encoder in DP SST mode */ + enc10->base.funcs->setup(&enc10->base, SIGNAL_TYPE_DISPLAY_PORT); + + /* ensure normal panel mode. */ + setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT); + + /* no vbid after BS (SR) + * DP_LINK_FRAMING_CNTL changed history Sandra Liu + * 11000260 / 11000104 / 110000FC + */ + REG_UPDATE_3(DP_LINK_FRAMING_CNTL, + DP_IDLE_BS_INTERVAL, 0xFC, + DP_VBID_DISABLE, 1, + DP_VID_ENHANCED_FRAME_MODE, 1); + + /* swap every BS with SR */ + REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0); + + /* select cp2520 patterns */ + if (REG(DP_DPHY_HBR2_PATTERN_CONTROL)) + REG_UPDATE(DP_DPHY_HBR2_PATTERN_CONTROL, + DP_DPHY_HBR2_PATTERN_CONTROL, cp2520_pattern); + else + /* pre-DCE11 can only generate CP2520 pattern 2 */ + ASSERT(cp2520_pattern == 2); + + /* set link training complete */ + set_link_training_complete(enc10, true); + + /* disable video stream */ + REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0); + + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); +} + +static void set_dp_phy_pattern_passthrough_mode( + struct dcn10_link_encoder *enc10, + enum dp_panel_mode panel_mode) +{ + /* program correct panel mode */ + setup_panel_mode(enc10, panel_mode); + + /* restore LINK_FRAMING_CNTL and DPHY_SCRAMBLER_BS_COUNT + * in case we were doing HBR2 compliance pattern before + */ + REG_UPDATE_3(DP_LINK_FRAMING_CNTL, + DP_IDLE_BS_INTERVAL, 0x2000, + DP_VBID_DISABLE, 0, + DP_VID_ENHANCED_FRAME_MODE, 1); + + REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, 0x1FF); + + /* set link training complete */ + set_link_training_complete(enc10, true); + + /* Disable PHY Bypass mode to setup the test pattern */ + enable_phy_bypass_mode(enc10, false); + + /* Disable PRBS mode */ + disable_prbs_mode(enc10); +} + +/* return value is bit-vector */ +static uint8_t get_frontend_source( + enum engine_id engine) +{ + switch (engine) { + case ENGINE_ID_DIGA: + return DCN10_DIG_FE_SOURCE_SELECT_DIGA; + case ENGINE_ID_DIGB: + return DCN10_DIG_FE_SOURCE_SELECT_DIGB; + case ENGINE_ID_DIGC: + return DCN10_DIG_FE_SOURCE_SELECT_DIGC; + case ENGINE_ID_DIGD: + return DCN10_DIG_FE_SOURCE_SELECT_DIGD; + case ENGINE_ID_DIGE: + return DCN10_DIG_FE_SOURCE_SELECT_DIGE; + case ENGINE_ID_DIGF: + return DCN10_DIG_FE_SOURCE_SELECT_DIGF; + case ENGINE_ID_DIGG: + return DCN10_DIG_FE_SOURCE_SELECT_DIGG; + default: + ASSERT_CRITICAL(false); + return DCN10_DIG_FE_SOURCE_SELECT_INVALID; + } +} + +static void configure_encoder( + struct dcn10_link_encoder *enc10, + const struct dc_link_settings *link_settings) +{ + /* set number of lanes */ + + REG_SET(DP_CONFIG, 0, + DP_UDI_LANES, link_settings->lane_count - LANE_COUNT_ONE); + + /* setup scrambler */ + REG_UPDATE(DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, 1); +} + +void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc, + bool exit_link_training_required) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + if (exit_link_training_required) + REG_UPDATE(DP_DPHY_FAST_TRAINING, + DPHY_RX_FAST_TRAINING_CAPABLE, 1); + else { + REG_UPDATE(DP_DPHY_FAST_TRAINING, + DPHY_RX_FAST_TRAINING_CAPABLE, 0); + /*In DCE 11, we are able to pre-program a Force SR register + * to be able to trigger SR symbol after 5 idle patterns + * transmitted. Upon PSR Exit, DMCU can trigger + * DPHY_LOAD_BS_COUNT_START = 1. Upon writing 1 to + * DPHY_LOAD_BS_COUNT_START and the internal counter + * reaches DPHY_LOAD_BS_COUNT, the next BS symbol will be + * replaced by SR symbol once. + */ + + REG_UPDATE(DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, 0x5); + } +} + +void dcn10_psr_program_secondary_packet(struct link_encoder *enc, + unsigned int sdp_transmit_line_num_deadline) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + REG_UPDATE_2(DP_SEC_CNTL1, + DP_SEC_GSP0_LINE_NUM, sdp_transmit_line_num_deadline, + DP_SEC_GSP0_PRIORITY, 1); +} + +bool dcn10_is_dig_enabled(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(DIG_BE_EN_CNTL, DIG_ENABLE, &value); + return value; +} + +static void link_encoder_disable(struct dcn10_link_encoder *enc10) +{ + /* reset training pattern */ + REG_SET(DP_DPHY_TRAINING_PATTERN_SEL, 0, + DPHY_TRAINING_PATTERN_SEL, 0); + + /* reset training complete */ + REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0); + + /* reset panel mode */ + setup_panel_mode(enc10, DP_PANEL_MODE_DEFAULT); +} + +static void hpd_initialize( + struct dcn10_link_encoder *enc10) +{ + /* Associate HPD with DIG_BE */ + enum hpd_source_id hpd_source = enc10->base.hpd_source; + + REG_UPDATE(DIG_BE_CNTL, DIG_HPD_SELECT, hpd_source); +} + +bool dcn10_link_encoder_validate_dvi_output( + const struct dcn10_link_encoder *enc10, + enum signal_type connector_signal, + enum signal_type signal, + const struct dc_crtc_timing *crtc_timing) +{ + uint32_t max_pixel_clock = TMDS_MAX_PIXEL_CLOCK; + + if (signal == SIGNAL_TYPE_DVI_DUAL_LINK) + max_pixel_clock *= 2; + + /* This handles the case of HDMI downgrade to DVI we don't want to + * we don't want to cap the pixel clock if the DDI is not DVI. + */ + if (connector_signal != SIGNAL_TYPE_DVI_DUAL_LINK && + connector_signal != SIGNAL_TYPE_DVI_SINGLE_LINK) + max_pixel_clock = enc10->base.features.max_hdmi_pixel_clock; + + /* DVI only support RGB pixel encoding */ + if (crtc_timing->pixel_encoding != PIXEL_ENCODING_RGB) + return false; + + /*connect DVI via adpater's HDMI connector*/ + if ((connector_signal == SIGNAL_TYPE_DVI_SINGLE_LINK || + connector_signal == SIGNAL_TYPE_HDMI_TYPE_A) && + signal != SIGNAL_TYPE_HDMI_TYPE_A && + crtc_timing->pix_clk_khz > TMDS_MAX_PIXEL_CLOCK) + return false; + if (crtc_timing->pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) + return false; + + if (crtc_timing->pix_clk_khz > max_pixel_clock) + return false; + + /* DVI supports 6/8bpp single-link and 10/16bpp dual-link */ + switch (crtc_timing->display_color_depth) { + case COLOR_DEPTH_666: + case COLOR_DEPTH_888: + break; + case COLOR_DEPTH_101010: + case COLOR_DEPTH_161616: + if (signal != SIGNAL_TYPE_DVI_DUAL_LINK) + return false; + break; + default: + return false; + } + + return true; +} + +static bool dcn10_link_encoder_validate_hdmi_output( + const struct dcn10_link_encoder *enc10, + const struct dc_crtc_timing *crtc_timing, + int adjusted_pix_clk_khz) +{ + enum dc_color_depth max_deep_color = + enc10->base.features.max_hdmi_deep_color; + + if (max_deep_color < crtc_timing->display_color_depth) + return false; + + if (crtc_timing->display_color_depth < COLOR_DEPTH_888) + return false; + if (adjusted_pix_clk_khz < TMDS_MIN_PIXEL_CLOCK) + return false; + + if ((adjusted_pix_clk_khz == 0) || + (adjusted_pix_clk_khz > enc10->base.features.max_hdmi_pixel_clock)) + return false; + + /* DCE11 HW does not support 420 */ + if (!enc10->base.features.ycbcr420_supported && + crtc_timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + return false; + + if (!enc10->base.features.flags.bits.HDMI_6GB_EN && + adjusted_pix_clk_khz >= 300000) + return false; + return true; +} + +bool dcn10_link_encoder_validate_dp_output( + const struct dcn10_link_encoder *enc10, + const struct dc_crtc_timing *crtc_timing) +{ + /* default RGB only */ + if (crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) + return true; + + if (enc10->base.features.flags.bits.IS_YCBCR_CAPABLE) + return true; + + /* for DCE 8.x or later DP Y-only feature, + * we need ASIC cap + FeatureSupportDPYonly, not support 666 + */ + if (crtc_timing->flags.Y_ONLY && + enc10->base.features.flags.bits.IS_YCBCR_CAPABLE && + crtc_timing->display_color_depth != COLOR_DEPTH_666) + return true; + + return false; +} + +void dcn10_link_encoder_construct( + struct dcn10_link_encoder *enc10, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + + enc10->base.funcs = &dcn10_lnk_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } +} + +bool dcn10_link_encoder_validate_output_with_stream( + struct link_encoder *enc, + const struct dc_stream_state *stream) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + bool is_valid; + + switch (stream->signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + is_valid = dcn10_link_encoder_validate_dvi_output( + enc10, + stream->sink->link->connector_signal, + stream->signal, + &stream->timing); + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + is_valid = dcn10_link_encoder_validate_hdmi_output( + enc10, + &stream->timing, + stream->phy_pix_clk); + break; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + is_valid = dcn10_link_encoder_validate_dp_output( + enc10, &stream->timing); + break; + case SIGNAL_TYPE_EDP: + is_valid = (stream->timing.pixel_encoding == PIXEL_ENCODING_RGB) ? true : false; + break; + case SIGNAL_TYPE_VIRTUAL: + is_valid = true; + break; + default: + is_valid = false; + break; + } + + return is_valid; +} + +void dcn10_link_encoder_hw_init( + struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + cntl.action = TRANSMITTER_CONTROL_INIT; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc10->base.transmitter; + cntl.connector_obj_id = enc10->base.connector; + cntl.lanes_number = LANE_COUNT_FOUR; + cntl.coherent = false; + cntl.hpd_sel = enc10->base.hpd_source; + + if (enc10->base.connector.id == CONNECTOR_ID_EDP) + cntl.signal = SIGNAL_TYPE_EDP; + + result = link_transmitter_control(enc10, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + + if (enc10->base.connector.id == CONNECTOR_ID_LVDS) { + cntl.action = TRANSMITTER_CONTROL_BACKLIGHT_BRIGHTNESS; + + result = link_transmitter_control(enc10, &cntl); + + ASSERT(result == BP_RESULT_OK); + + } + aux_initialize(enc10); + + /* reinitialize HPD. + * hpd_initialize() will pass DIG_FE id to HW context. + * All other routine within HW context will use fe_engine_offset + * as DIG_FE id even caller pass DIG_FE id. + * So this routine must be called first. + */ + hpd_initialize(enc10); +} + +void dcn10_link_encoder_destroy(struct link_encoder **enc) +{ + kfree(TO_DCN10_LINK_ENC(*enc)); + *enc = NULL; +} + +void dcn10_link_encoder_setup( + struct link_encoder *enc, + enum signal_type signal) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + switch (signal) { + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + /* DP SST */ + REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 0); + break; + case SIGNAL_TYPE_LVDS: + /* LVDS */ + REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 1); + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + /* TMDS-DVI */ + REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 2); + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + /* TMDS-HDMI */ + REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 3); + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* DP MST */ + REG_UPDATE(DIG_BE_CNTL, DIG_MODE, 5); + break; + default: + ASSERT_CRITICAL(false); + /* invalid mode ! */ + break; + } + +} + +/* TODO: still need depth or just pass in adjusted pixel clock? */ +void dcn10_link_encoder_enable_tmds_output( + struct link_encoder *enc, + enum clock_source_id clock_source, + enum dc_color_depth color_depth, + enum signal_type signal, + uint32_t pixel_clock) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = enc->preferred_engine; + cntl.transmitter = enc10->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = signal; + if (cntl.signal == SIGNAL_TYPE_DVI_DUAL_LINK) + cntl.lanes_number = 8; + else + cntl.lanes_number = 4; + + cntl.hpd_sel = enc10->base.hpd_source; + + cntl.pixel_clock = pixel_clock; + cntl.color_depth = color_depth; + + result = link_transmitter_control(enc10, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +/* enables DP PHY output */ +void dcn10_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + + /* number_of_lanes is used for pixel clock adjust, + * but it's not passed to asic_control. + * We need to set number of lanes manually. + */ + configure_encoder(enc10, link_settings); + + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = enc->preferred_engine; + cntl.transmitter = enc10->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc10->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate + * LINK_RATE_REF_FREQ_IN_KHZ; + /* TODO: check if undefined works */ + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + + result = link_transmitter_control(enc10, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} + +/* enables DP PHY output in MST mode */ +void dcn10_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + /* Enable the PHY */ + + /* number_of_lanes is used for pixel clock adjust, + * but it's not passed to asic_control. + * We need to set number of lanes manually. + */ + configure_encoder(enc10, link_settings); + + cntl.action = TRANSMITTER_CONTROL_ENABLE; + cntl.engine_id = ENGINE_ID_UNKNOWN; + cntl.transmitter = enc10->base.transmitter; + cntl.pll_id = clock_source; + cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST; + cntl.lanes_number = link_settings->lane_count; + cntl.hpd_sel = enc10->base.hpd_source; + cntl.pixel_clock = link_settings->link_rate + * LINK_RATE_REF_FREQ_IN_KHZ; + /* TODO: check if undefined works */ + cntl.color_depth = COLOR_DEPTH_UNDEFINED; + + result = link_transmitter_control(enc10, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + } +} +/* + * @brief + * Disable transmitter and its encoder + */ +void dcn10_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct bp_transmitter_control cntl = { 0 }; + enum bp_result result; + + if (!dcn10_is_dig_enabled(enc)) { + /* OF_SKIP_POWER_DOWN_INACTIVE_ENCODER */ + return; + } + /* Power-down RX and disable GPU PHY should be paired. + * Disabling PHY without powering down RX may cause + * symbol lock loss, on which we will get DP Sink interrupt. + */ + + /* There is a case for the DP active dongles + * where we want to disable the PHY but keep RX powered, + * for those we need to ignore DP Sink interrupt + * by checking lane count that has been set + * on the last do_enable_output(). + */ + + /* disable transmitter */ + cntl.action = TRANSMITTER_CONTROL_DISABLE; + cntl.transmitter = enc10->base.transmitter; + cntl.hpd_sel = enc10->base.hpd_source; + cntl.signal = signal; + cntl.connector_obj_id = enc10->base.connector; + + result = link_transmitter_control(enc10, &cntl); + + if (result != BP_RESULT_OK) { + DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n", + __func__); + BREAK_TO_DEBUGGER(); + return; + } + + /* disable encoder */ + if (dc_is_dp_signal(signal)) + link_encoder_disable(enc10); +} + +void dcn10_link_encoder_dp_set_lane_settings( + struct link_encoder *enc, + const struct link_training_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + union dpcd_training_lane_set training_lane_set = { { 0 } }; + int32_t lane = 0; + struct bp_transmitter_control cntl = { 0 }; + + if (!link_settings) { + BREAK_TO_DEBUGGER(); + return; + } + + cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS; + cntl.transmitter = enc10->base.transmitter; + cntl.connector_obj_id = enc10->base.connector; + cntl.lanes_number = link_settings->link_settings.lane_count; + cntl.hpd_sel = enc10->base.hpd_source; + cntl.pixel_clock = link_settings->link_settings.link_rate * + LINK_RATE_REF_FREQ_IN_KHZ; + + for (lane = 0; lane < link_settings->link_settings.lane_count; lane++) { + /* translate lane settings */ + + training_lane_set.bits.VOLTAGE_SWING_SET = + link_settings->lane_settings[lane].VOLTAGE_SWING; + training_lane_set.bits.PRE_EMPHASIS_SET = + link_settings->lane_settings[lane].PRE_EMPHASIS; + + /* post cursor 2 setting only applies to HBR2 link rate */ + if (link_settings->link_settings.link_rate == LINK_RATE_HIGH2) { + /* this is passed to VBIOS + * to program post cursor 2 level + */ + training_lane_set.bits.POST_CURSOR2_SET = + link_settings->lane_settings[lane].POST_CURSOR2; + } + + cntl.lane_select = lane; + cntl.lane_settings = training_lane_set.raw; + + /* call VBIOS table to set voltage swing and pre-emphasis */ + link_transmitter_control(enc10, &cntl); + } +} + +/* set DP PHY test and training patterns */ +void dcn10_link_encoder_dp_set_phy_pattern( + struct link_encoder *enc, + const struct encoder_set_dp_phy_pattern_param *param) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + switch (param->dp_phy_pattern) { + case DP_TEST_PATTERN_TRAINING_PATTERN1: + dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 0); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN2: + dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 1); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN3: + dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 2); + break; + case DP_TEST_PATTERN_TRAINING_PATTERN4: + dcn10_link_encoder_set_dp_phy_pattern_training_pattern(enc, 3); + break; + case DP_TEST_PATTERN_D102: + set_dp_phy_pattern_d102(enc10); + break; + case DP_TEST_PATTERN_SYMBOL_ERROR: + set_dp_phy_pattern_symbol_error(enc10); + break; + case DP_TEST_PATTERN_PRBS7: + set_dp_phy_pattern_prbs7(enc10); + break; + case DP_TEST_PATTERN_80BIT_CUSTOM: + set_dp_phy_pattern_80bit_custom( + enc10, param->custom_pattern); + break; + case DP_TEST_PATTERN_CP2520_1: + set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 1); + break; + case DP_TEST_PATTERN_CP2520_2: + set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 2); + break; + case DP_TEST_PATTERN_CP2520_3: + set_dp_phy_pattern_hbr2_compliance_cp2520_2(enc10, 3); + break; + case DP_TEST_PATTERN_VIDEO_MODE: { + set_dp_phy_pattern_passthrough_mode( + enc10, param->dp_panel_mode); + break; + } + + default: + /* invalid phy pattern */ + ASSERT_CRITICAL(false); + break; + } +} + +static void fill_stream_allocation_row_info( + const struct link_mst_stream_allocation *stream_allocation, + uint32_t *src, + uint32_t *slots) +{ + const struct stream_encoder *stream_enc = stream_allocation->stream_enc; + + if (stream_enc) { + *src = stream_enc->id; + *slots = stream_allocation->slot_count; + } else { + *src = 0; + *slots = 0; + } +} + +/* programs DP MST VC payload allocation */ +void dcn10_link_encoder_update_mst_stream_allocation_table( + struct link_encoder *enc, + const struct link_mst_stream_allocation_table *table) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value0 = 0; + uint32_t value1 = 0; + uint32_t value2 = 0; + uint32_t slots = 0; + uint32_t src = 0; + uint32_t retries = 0; + + /* For CZ, there are only 3 pipes. So Virtual channel is up 3.*/ + + /* --- Set MSE Stream Attribute - + * Setup VC Payload Table on Tx Side, + * Issue allocation change trigger + * to commit payload on both tx and rx side + */ + + /* we should clean-up table each time */ + + if (table->stream_count >= 1) { + fill_stream_allocation_row_info( + &table->stream_allocations[0], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_MSE_SAT0, + DP_MSE_SAT_SRC0, src, + DP_MSE_SAT_SLOT_COUNT0, slots); + + if (table->stream_count >= 2) { + fill_stream_allocation_row_info( + &table->stream_allocations[1], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_MSE_SAT0, + DP_MSE_SAT_SRC1, src, + DP_MSE_SAT_SLOT_COUNT1, slots); + + if (table->stream_count >= 3) { + fill_stream_allocation_row_info( + &table->stream_allocations[2], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_MSE_SAT1, + DP_MSE_SAT_SRC2, src, + DP_MSE_SAT_SLOT_COUNT2, slots); + + if (table->stream_count >= 4) { + fill_stream_allocation_row_info( + &table->stream_allocations[3], + &src, + &slots); + } else { + src = 0; + slots = 0; + } + + REG_UPDATE_2(DP_MSE_SAT1, + DP_MSE_SAT_SRC3, src, + DP_MSE_SAT_SLOT_COUNT3, slots); + + /* --- wait for transaction finish */ + + /* send allocation change trigger (ACT) ? + * this step first sends the ACT, + * then double buffers the SAT into the hardware + * making the new allocation active on the DP MST mode link + */ + + /* DP_MSE_SAT_UPDATE: + * 0 - No Action + * 1 - Update SAT with trigger + * 2 - Update SAT without trigger + */ + REG_UPDATE(DP_MSE_SAT_UPDATE, + DP_MSE_SAT_UPDATE, 1); + + /* wait for update to complete + * (i.e. DP_MSE_SAT_UPDATE field is reset to 0) + * then wait for the transmission + * of at least 16 MTP headers on immediate local link. + * i.e. DP_MSE_16_MTP_KEEPOUT field (read only) is reset to 0 + * a value of 1 indicates that DP MST mode + * is in the 16 MTP keepout region after a VC has been added. + * MST stream bandwidth (VC rate) can be configured + * after this bit is cleared + */ + do { + udelay(10); + + value0 = REG_READ(DP_MSE_SAT_UPDATE); + + REG_GET(DP_MSE_SAT_UPDATE, + DP_MSE_SAT_UPDATE, &value1); + + REG_GET(DP_MSE_SAT_UPDATE, + DP_MSE_16_MTP_KEEPOUT, &value2); + + /* bit field DP_MSE_SAT_UPDATE is set to 1 already */ + if (!value1 && !value2) + break; + ++retries; + } while (retries < DP_MST_UPDATE_MAX_RETRY); +} + +void dcn10_link_encoder_connect_dig_be_to_fe( + struct link_encoder *enc, + enum engine_id engine, + bool connect) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t field; + + if (engine != ENGINE_ID_UNKNOWN) { + + REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &field); + + if (connect) + field |= get_frontend_source(engine); + else + field &= ~get_frontend_source(engine); + + REG_UPDATE(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, field); + } +} + + +#define HPD_REG(reg)\ + (enc10->hpd_regs->reg) + +#define HPD_REG_READ(reg_name) \ + dm_read_reg(CTX, HPD_REG(reg_name)) + +#define HPD_REG_UPDATE_N(reg_name, n, ...) \ + generic_reg_update_ex(CTX, \ + HPD_REG(reg_name), \ + HPD_REG_READ(reg_name), \ + n, __VA_ARGS__) + +#define HPD_REG_UPDATE(reg_name, field, val) \ + HPD_REG_UPDATE_N(reg_name, 1, \ + FN(reg_name, field), val) + +void dcn10_link_encoder_enable_hpd(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + HPD_REG_UPDATE(DC_HPD_CONTROL, + DC_HPD_EN, 1); +} + +void dcn10_link_encoder_disable_hpd(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + HPD_REG_UPDATE(DC_HPD_CONTROL, + DC_HPD_EN, 0); +} + + +#define AUX_REG(reg)\ + (enc10->aux_regs->reg) + +#define AUX_REG_READ(reg_name) \ + dm_read_reg(CTX, AUX_REG(reg_name)) + +#define AUX_REG_UPDATE_N(reg_name, n, ...) \ + generic_reg_update_ex(CTX, \ + AUX_REG(reg_name), \ + AUX_REG_READ(reg_name), \ + n, __VA_ARGS__) + +#define AUX_REG_UPDATE(reg_name, field, val) \ + AUX_REG_UPDATE_N(reg_name, 1, \ + FN(reg_name, field), val) + +#define AUX_REG_UPDATE_2(reg, f1, v1, f2, v2) \ + AUX_REG_UPDATE_N(reg, 2,\ + FN(reg, f1), v1,\ + FN(reg, f2), v2) + +static void aux_initialize( + struct dcn10_link_encoder *enc10) +{ + enum hpd_source_id hpd_source = enc10->base.hpd_source; + + AUX_REG_UPDATE_2(AUX_CONTROL, + AUX_HPD_SEL, hpd_source, + AUX_LS_READ_EN, 0); + + /* 1/4 window (the maximum allowed) */ + AUX_REG_UPDATE(AUX_DPHY_RX_CONTROL0, + AUX_RX_RECEIVE_WINDOW, 1); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h new file mode 100644 index 000000000000..2a97cdb2cfbb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -0,0 +1,330 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN10_H__ +#define __DC_LINK_ENCODER__DCN10_H__ + +#include "link_encoder.h" + +#define TO_DCN10_LINK_ENC(link_encoder)\ + container_of(link_encoder, struct dcn10_link_encoder, base) + + +#define AUX_REG_LIST(id)\ + SRI(AUX_CONTROL, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id) + +#define HPD_REG_LIST(id)\ + SRI(DC_HPD_CONTROL, HPD, id) + +#define LE_DCN_COMMON_REG_LIST(id) \ + SRI(DIG_BE_CNTL, DIG, id), \ + SRI(DIG_BE_EN_CNTL, DIG, id), \ + SRI(DP_CONFIG, DP, id), \ + SRI(DP_DPHY_CNTL, DP, id), \ + SRI(DP_DPHY_PRBS_CNTL, DP, id), \ + SRI(DP_DPHY_SCRAM_CNTL, DP, id),\ + SRI(DP_DPHY_SYM0, DP, id), \ + SRI(DP_DPHY_SYM1, DP, id), \ + SRI(DP_DPHY_SYM2, DP, id), \ + SRI(DP_DPHY_TRAINING_PATTERN_SEL, DP, id), \ + SRI(DP_LINK_CNTL, DP, id), \ + SRI(DP_LINK_FRAMING_CNTL, DP, id), \ + SRI(DP_MSE_SAT0, DP, id), \ + SRI(DP_MSE_SAT1, DP, id), \ + SRI(DP_MSE_SAT2, DP, id), \ + SRI(DP_MSE_SAT_UPDATE, DP, id), \ + SRI(DP_SEC_CNTL, DP, id), \ + SRI(DP_VID_STREAM_CNTL, DP, id), \ + SRI(DP_DPHY_FAST_TRAINING, DP, id), \ + SRI(DP_SEC_CNTL1, DP, id), \ + SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \ + SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id) + +#define LE_DCN10_REG_LIST(id)\ + LE_DCN_COMMON_REG_LIST(id) + +struct dcn10_link_enc_aux_registers { + uint32_t AUX_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL0; +}; + +struct dcn10_link_enc_hpd_registers { + uint32_t DC_HPD_CONTROL; +}; + +struct dcn10_link_enc_registers { + uint32_t DIG_BE_CNTL; + uint32_t DIG_BE_EN_CNTL; + uint32_t DP_CONFIG; + uint32_t DP_DPHY_CNTL; + uint32_t DP_DPHY_INTERNAL_CTRL; + uint32_t DP_DPHY_PRBS_CNTL; + uint32_t DP_DPHY_SCRAM_CNTL; + uint32_t DP_DPHY_SYM0; + uint32_t DP_DPHY_SYM1; + uint32_t DP_DPHY_SYM2; + uint32_t DP_DPHY_TRAINING_PATTERN_SEL; + uint32_t DP_LINK_CNTL; + uint32_t DP_LINK_FRAMING_CNTL; + uint32_t DP_MSE_SAT0; + uint32_t DP_MSE_SAT1; + uint32_t DP_MSE_SAT2; + uint32_t DP_MSE_SAT_UPDATE; + uint32_t DP_SEC_CNTL; + uint32_t DP_VID_STREAM_CNTL; + uint32_t DP_DPHY_FAST_TRAINING; + uint32_t DP_DPHY_BS_SR_SWAP_CNTL; + uint32_t DP_DPHY_HBR2_PATTERN_CONTROL; + uint32_t DP_SEC_CNTL1; +}; + +#define LE_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + +#define LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh)\ + LE_SF(DIG0_DIG_BE_EN_CNTL, DIG_ENABLE, mask_sh),\ + LE_SF(DIG0_DIG_BE_CNTL, DIG_HPD_SELECT, mask_sh),\ + LE_SF(DIG0_DIG_BE_CNTL, DIG_MODE, mask_sh),\ + LE_SF(DIG0_DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, mask_sh),\ + LE_SF(DP0_DP_DPHY_CNTL, DPHY_BYPASS, mask_sh),\ + LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE0, mask_sh),\ + LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE1, mask_sh),\ + LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE2, mask_sh),\ + LE_SF(DP0_DP_DPHY_CNTL, DPHY_ATEST_SEL_LANE3, mask_sh),\ + LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_EN, mask_sh),\ + LE_SF(DP0_DP_DPHY_PRBS_CNTL, DPHY_PRBS_SEL, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM1, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM2, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM0, DPHY_SYM3, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM4, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM5, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM1, DPHY_SYM6, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM7, mask_sh),\ + LE_SF(DP0_DP_DPHY_SYM2, DPHY_SYM8, mask_sh),\ + LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_BS_COUNT, mask_sh),\ + LE_SF(DP0_DP_DPHY_SCRAM_CNTL, DPHY_SCRAMBLER_ADVANCE, mask_sh),\ + LE_SF(DP0_DP_DPHY_FAST_TRAINING, DPHY_RX_FAST_TRAINING_CAPABLE, mask_sh),\ + LE_SF(DP0_DP_DPHY_BS_SR_SWAP_CNTL, DPHY_LOAD_BS_COUNT, mask_sh),\ + LE_SF(DP0_DP_DPHY_TRAINING_PATTERN_SEL, DPHY_TRAINING_PATTERN_SEL, mask_sh),\ + LE_SF(DP0_DP_DPHY_HBR2_PATTERN_CONTROL, DP_DPHY_HBR2_PATTERN_CONTROL, mask_sh),\ + LE_SF(DP0_DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, mask_sh),\ + LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_IDLE_BS_INTERVAL, mask_sh),\ + LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VBID_DISABLE, mask_sh),\ + LE_SF(DP0_DP_LINK_FRAMING_CNTL, DP_VID_ENHANCED_FRAME_MODE, mask_sh),\ + LE_SF(DP0_DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, mask_sh),\ + LE_SF(DP0_DP_CONFIG, DP_UDI_LANES, mask_sh),\ + LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_LINE_NUM, mask_sh),\ + LE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP0_PRIORITY, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC0, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SRC1, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT0, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT0, DP_MSE_SAT_SLOT_COUNT1, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC2, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SRC3, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT2, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT1, DP_MSE_SAT_SLOT_COUNT3, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_SAT_UPDATE, mask_sh),\ + LE_SF(DP0_DP_MSE_SAT_UPDATE, DP_MSE_16_MTP_KEEPOUT, mask_sh),\ + LE_SF(DP_AUX0_AUX_CONTROL, AUX_HPD_SEL, mask_sh),\ + LE_SF(DP_AUX0_AUX_CONTROL, AUX_LS_READ_EN, mask_sh),\ + LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_RECEIVE_WINDOW, mask_sh),\ + LE_SF(HPD0_DC_HPD_CONTROL, DC_HPD_EN, mask_sh) + +#define DCN_LINK_ENCODER_REG_FIELD_LIST(type) \ + type DIG_ENABLE;\ + type DIG_HPD_SELECT;\ + type DIG_MODE;\ + type DIG_FE_SOURCE_SELECT;\ + type DPHY_BYPASS;\ + type DPHY_ATEST_SEL_LANE0;\ + type DPHY_ATEST_SEL_LANE1;\ + type DPHY_ATEST_SEL_LANE2;\ + type DPHY_ATEST_SEL_LANE3;\ + type DPHY_PRBS_EN;\ + type DPHY_PRBS_SEL;\ + type DPHY_SYM1;\ + type DPHY_SYM2;\ + type DPHY_SYM3;\ + type DPHY_SYM4;\ + type DPHY_SYM5;\ + type DPHY_SYM6;\ + type DPHY_SYM7;\ + type DPHY_SYM8;\ + type DPHY_SCRAMBLER_BS_COUNT;\ + type DPHY_SCRAMBLER_ADVANCE;\ + type DPHY_RX_FAST_TRAINING_CAPABLE;\ + type DPHY_LOAD_BS_COUNT;\ + type DPHY_TRAINING_PATTERN_SEL;\ + type DP_DPHY_HBR2_PATTERN_CONTROL;\ + type DP_LINK_TRAINING_COMPLETE;\ + type DP_IDLE_BS_INTERVAL;\ + type DP_VBID_DISABLE;\ + type DP_VID_ENHANCED_FRAME_MODE;\ + type DP_VID_STREAM_ENABLE;\ + type DP_UDI_LANES;\ + type DP_SEC_GSP0_LINE_NUM;\ + type DP_SEC_GSP0_PRIORITY;\ + type DP_MSE_SAT_SRC0;\ + type DP_MSE_SAT_SRC1;\ + type DP_MSE_SAT_SRC2;\ + type DP_MSE_SAT_SRC3;\ + type DP_MSE_SAT_SLOT_COUNT0;\ + type DP_MSE_SAT_SLOT_COUNT1;\ + type DP_MSE_SAT_SLOT_COUNT2;\ + type DP_MSE_SAT_SLOT_COUNT3;\ + type DP_MSE_SAT_UPDATE;\ + type DP_MSE_16_MTP_KEEPOUT;\ + type AUX_HPD_SEL;\ + type AUX_LS_READ_EN;\ + type AUX_RX_RECEIVE_WINDOW;\ + type DC_HPD_EN + +struct dcn10_link_enc_shift { + DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); +}; + +struct dcn10_link_enc_mask { + DCN_LINK_ENCODER_REG_FIELD_LIST(uint32_t); +}; + +struct dcn10_link_encoder { + struct link_encoder base; + const struct dcn10_link_enc_registers *link_regs; + const struct dcn10_link_enc_aux_registers *aux_regs; + const struct dcn10_link_enc_hpd_registers *hpd_regs; + const struct dcn10_link_enc_shift *link_shift; + const struct dcn10_link_enc_mask *link_mask; +}; + + +void dcn10_link_encoder_construct( + struct dcn10_link_encoder *enc10, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +bool dcn10_link_encoder_validate_dvi_output( + const struct dcn10_link_encoder *enc10, + enum signal_type connector_signal, + enum signal_type signal, + const struct dc_crtc_timing *crtc_timing); + +bool dcn10_link_encoder_validate_rgb_output( + const struct dcn10_link_encoder *enc10, + const struct dc_crtc_timing *crtc_timing); + +bool dcn10_link_encoder_validate_dp_output( + const struct dcn10_link_encoder *enc10, + const struct dc_crtc_timing *crtc_timing); + +bool dcn10_link_encoder_validate_wireless_output( + const struct dcn10_link_encoder *enc10, + const struct dc_crtc_timing *crtc_timing); + +bool dcn10_link_encoder_validate_output_with_stream( + struct link_encoder *enc, + const struct dc_stream_state *stream); + +/****************** HW programming ************************/ + +/* initialize HW */ /* why do we initialze aux in here? */ +void dcn10_link_encoder_hw_init(struct link_encoder *enc); + +void dcn10_link_encoder_destroy(struct link_encoder **enc); + +/* program DIG_MODE in DIG_BE */ +/* TODO can this be combined with enable_output? */ +void dcn10_link_encoder_setup( + struct link_encoder *enc, + enum signal_type signal); + +/* enables TMDS PHY output */ +/* TODO: still need depth or just pass in adjusted pixel clock? */ +void dcn10_link_encoder_enable_tmds_output( + struct link_encoder *enc, + enum clock_source_id clock_source, + enum dc_color_depth color_depth, + enum signal_type signal, + uint32_t pixel_clock); + +/* enables DP PHY output */ +void dcn10_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +/* enables DP PHY output in MST mode */ +void dcn10_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +/* disable PHY output */ +void dcn10_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal); + +/* set DP lane settings */ +void dcn10_link_encoder_dp_set_lane_settings( + struct link_encoder *enc, + const struct link_training_settings *link_settings); + +void dcn10_link_encoder_dp_set_phy_pattern( + struct link_encoder *enc, + const struct encoder_set_dp_phy_pattern_param *param); + +/* programs DP MST VC payload allocation */ +void dcn10_link_encoder_update_mst_stream_allocation_table( + struct link_encoder *enc, + const struct link_mst_stream_allocation_table *table); + +void dcn10_link_encoder_connect_dig_be_to_fe( + struct link_encoder *enc, + enum engine_id engine, + bool connect); + +void dcn10_link_encoder_set_dp_phy_pattern_training_pattern( + struct link_encoder *enc, + uint32_t index); + +void dcn10_link_encoder_enable_hpd(struct link_encoder *enc); + +void dcn10_link_encoder_disable_hpd(struct link_encoder *enc); + +void dcn10_psr_program_dp_dphy_fast_training(struct link_encoder *enc, + bool exit_link_training_required); + +void dcn10_psr_program_secondary_packet(struct link_encoder *enc, + unsigned int sdp_transmit_line_num_deadline); + +bool dcn10_is_dig_enabled(struct link_encoder *enc); + +#endif /* __DC_LINK_ENCODER__DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index ace2e03dced4..df5cb2d1d164 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -38,7 +38,7 @@ #include "dcn10/dcn10_hw_sequencer.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_opp.h" -#include "dce/dce_link_encoder.h" +#include "dcn10/dcn10_link_encoder.h" #include "dcn10/dcn10_stream_encoder.h" #include "dce/dce_clocks.h" #include "dce/dce_clock_source.h" @@ -214,13 +214,11 @@ static const struct dce_aduio_mask audio_mask = { AUX_REG_LIST(id)\ } -static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { aux_regs(0), aux_regs(1), aux_regs(2), - aux_regs(3), - aux_regs(4), - aux_regs(5) + aux_regs(3) }; #define hpd_regs(id)\ @@ -228,13 +226,11 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = { HPD_REG_LIST(id)\ } -static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { hpd_regs(0), hpd_regs(1), hpd_regs(2), - hpd_regs(3), - hpd_regs(4), - hpd_regs(5) + hpd_regs(3) }; #define link_regs(id)\ @@ -243,14 +239,19 @@ static const struct dce110_link_enc_hpd_registers link_enc_hpd_regs[] = { SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } -static const struct dce110_link_enc_registers link_enc_regs[] = { +static const struct dcn10_link_enc_registers link_enc_regs[] = { link_regs(0), link_regs(1), link_regs(2), - link_regs(3), - link_regs(4), - link_regs(5), - link_regs(6), + link_regs(3) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; #define ipp_regs(id)\ @@ -583,20 +584,22 @@ static const struct encoder_feature_support link_enc_feature = { struct link_encoder *dcn10_link_encoder_create( const struct encoder_init_data *enc_init_data) { - struct dce110_link_encoder *enc110 = - kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + struct dcn10_link_encoder *enc10 = + kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); - if (!enc110) + if (!enc10) return NULL; - dce110_link_encoder_construct(enc110, + dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, &link_enc_regs[enc_init_data->transmitter], &link_enc_aux_regs[enc_init_data->channel - 1], - &link_enc_hpd_regs[enc_init_data->hpd_source]); + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); - return &enc110->base; + return &enc10->base; } struct clock_source *dcn10_clock_source_create( -- cgit v1.2.1 From 9fcab85c580b31f6eb56dd3a00edd5f5270ad55c Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Fri, 27 Apr 2018 20:50:07 -0400 Subject: drm/amd/display: fix memory leaks Signed-off-by: Anthony Koo Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/modules/stats/stats.c | 24 +++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c index fe9e4b316d3a..3f7d47fdc367 100644 --- a/drivers/gpu/drm/amd/display/modules/stats/stats.c +++ b/drivers/gpu/drm/amd/display/modules/stats/stats.c @@ -115,12 +115,12 @@ struct mod_stats *mod_stats_create(struct dc *dc) unsigned int reg_data; int i = 0; + if (dc == NULL) + goto fail_construct; + core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL); if (core_stats == NULL) - goto fail_alloc_context; - - if (dc == NULL) goto fail_construct; core_stats->dc = dc; @@ -146,6 +146,8 @@ struct mod_stats *mod_stats_create(struct dc *dc) core_stats->entries, GFP_KERNEL); + if (core_stats->time == NULL) + goto fail_construct_time; core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT; core_stats->events = kzalloc( @@ -153,13 +155,13 @@ struct mod_stats *mod_stats_create(struct dc *dc) core_stats->event_entries, GFP_KERNEL); + if (core_stats->events == NULL) + goto fail_construct_events; + } else { core_stats->entries = 0; } - if (core_stats->time == NULL) - goto fail_construct; - /* Purposely leave index 0 unused so we don't need special logic to * handle calculation cases that depend on previous flip data. */ @@ -171,10 +173,13 @@ struct mod_stats *mod_stats_create(struct dc *dc) return &core_stats->public; -fail_construct: +fail_construct_events: + kfree(core_stats->time); + +fail_construct_time: kfree(core_stats); -fail_alloc_context: +fail_construct: return NULL; } @@ -186,6 +191,9 @@ void mod_stats_destroy(struct mod_stats *mod_stats) if (core_stats->time != NULL) kfree(core_stats->time); + if (core_stats->events != NULL) + kfree(core_stats->events); + kfree(core_stats); } } -- cgit v1.2.1 From 5326c4525d1b2d5f1519268dd305e19c9bd4ef56 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Fri, 27 Apr 2018 09:09:52 -0400 Subject: drm/amd/display: Clear connector's edid pointer Clear connector's edid pointer on coonnector update, when unplugging the display. Fix poison EDID when hotplugging on previously used connector. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6d0dc1fecb39..1ce10bc2d37b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -911,6 +911,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) drm_mode_connector_update_edid_property(connector, NULL); aconnector->num_modes = 0; aconnector->dc_sink = NULL; + aconnector->edid = NULL; } mutex_unlock(&dev->mode_config.mutex); -- cgit v1.2.1 From cd3cb7c08754cd5dd1cbccfc2296d6b7dde511f2 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 20:21:42 +0800 Subject: drm/amd/pp: Fix build warning in vegam warning: missing braces around initializer [-Wmissing-braces] Acked-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index cf99c5eaf080..ec38c9f50a4d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -320,7 +320,7 @@ int atomctrl_get_memory_pll_dividers_ai(struct pp_hwmgr *hwmgr, pp_atomctrl_memory_clock_param_ai *mpll_param) { struct amdgpu_device *adev = hwmgr->adev; - COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {0}; + COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_3 mpll_parameters = {{0}, 0, 0}; int result; mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index c9a563399330..a40f7141131c 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -1366,10 +1366,12 @@ static int vegam_program_memory_timing_parameters(struct pp_hwmgr *hwmgr) { struct smu7_hwmgr *hw_data = (struct smu7_hwmgr *)(hwmgr->backend); struct vegam_smumgr *smu_data = (struct vegam_smumgr *)(hwmgr->smu_backend); - struct SMU75_Discrete_MCArbDramTimingTable arb_regs = {0}; + struct SMU75_Discrete_MCArbDramTimingTable arb_regs; uint32_t i, j; int result = 0; + memset(&arb_regs, 0, sizeof(SMU75_Discrete_MCArbDramTimingTable)); + for (i = 0; i < hw_data->dpm_table.sclk_table.count; i++) { for (j = 0; j < hw_data->dpm_table.mclk_table.count; j++) { result = vegam_populate_memory_timing_parameters(hwmgr, -- cgit v1.2.1 From 6ee21dbfe9a79edf6f09d5f3ab1f3c4f0699dbf2 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 17 May 2018 13:31:49 -0400 Subject: drm/amdgpu: fix insert nop for VCN decode ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NO_OP register should be writen to 0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 0501746b6c2c..7fbbdb1e58da 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -1048,14 +1048,17 @@ static int vcn_v1_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static void vcn_v1_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; struct amdgpu_device *adev = ring->adev; + int i; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + WARN_ON(ring->wptr % 2 || count % 2); + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, 0); + } } @@ -1082,7 +1085,6 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .vmhub = AMDGPU_MMHUB, .get_rptr = vcn_v1_0_dec_ring_get_rptr, @@ -1101,7 +1103,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { .emit_vm_flush = vcn_v1_0_dec_ring_emit_vm_flush, .test_ring = amdgpu_vcn_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, - .insert_nop = vcn_v1_0_ring_insert_nop, + .insert_nop = vcn_v1_0_dec_ring_insert_nop, .insert_start = vcn_v1_0_dec_ring_insert_start, .insert_end = vcn_v1_0_dec_ring_insert_end, .pad_ib = amdgpu_ring_generic_pad_ib, -- cgit v1.2.1 From cbb7a239117d45d512fae1806cc7722f68c7b82f Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 17 May 2018 13:37:50 -0400 Subject: drm/amdgpu: fix insert nop for UVD7 ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NO_OP register should be writen to 0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index f9a5482101bc..57d32f21b3a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -1325,12 +1325,15 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, static void uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) { - int i; struct amdgpu_device *adev = ring->adev; + int i; - for (i = 0; i < count; i++) - amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); + WARN_ON(ring->wptr % 2 || count % 2); + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(UVD, ring->me, mmUVD_NO_OP), 0)); + amdgpu_ring_write(ring, 0); + } } static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) @@ -1710,7 +1713,6 @@ const struct amd_ip_funcs uvd_v7_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(0x81ff, 0), .support_64bit_ptrs = false, .vmhub = AMDGPU_MMHUB, .get_rptr = uvd_v7_0_ring_get_rptr, -- cgit v1.2.1 From 1aac3c918036d6bb0075281d431da3844a058d00 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 17 May 2018 13:44:28 -0400 Subject: drm/amdgpu: fix insert nop for UVD6 ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NO_OP register should be writen to 0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index dc391693d7ce..bfddf97dd13e 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -1100,6 +1100,18 @@ static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0xE); } +static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) { uint32_t seq = ring->fence_drv.sync_seq; @@ -1532,7 +1544,6 @@ static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, @@ -1548,7 +1559,7 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v6_0_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, -- cgit v1.2.1 From 0232e30623f3761ce9350328d4d96cea8372b114 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 17 May 2018 13:52:00 -0400 Subject: drm/amdgpu: fix insert nop for UVD5 ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NO_OP register should be writen to 0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index f5d074a887fc..341ee6d55ce8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -541,6 +541,18 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static void uvd_v5_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -841,7 +853,6 @@ static const struct amd_ip_funcs uvd_v5_0_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v5_0_ring_get_rptr, .get_wptr = uvd_v5_0_ring_get_wptr, @@ -854,7 +865,7 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .emit_fence = uvd_v5_0_ring_emit_fence, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v5_0_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, -- cgit v1.2.1 From def139037bbf9195467fa83c0a299d666e6ed0bb Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Thu, 17 May 2018 13:54:21 -0400 Subject: drm/amdgpu: fix insert nop for UVD4.2 ring MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NO_OP register should be writen to 0 Signed-off-by: Leo Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5f22135de77f..6fed3d7797a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -524,6 +524,18 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } +static void uvd_v4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) +{ + int i; + + WARN_ON(ring->wptr % 2 || count % 2); + + for (i = 0; i < count / 2; i++) { + amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); + amdgpu_ring_write(ring, 0); + } +} + /** * uvd_v4_2_mc_resume - memory controller programming * @@ -733,7 +745,6 @@ static const struct amd_ip_funcs uvd_v4_2_ip_funcs = { static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .type = AMDGPU_RING_TYPE_UVD, .align_mask = 0xf, - .nop = PACKET0(mmUVD_NO_OP, 0), .support_64bit_ptrs = false, .get_rptr = uvd_v4_2_ring_get_rptr, .get_wptr = uvd_v4_2_ring_get_wptr, @@ -746,7 +757,7 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .emit_fence = uvd_v4_2_ring_emit_fence, .test_ring = uvd_v4_2_ring_test_ring, .test_ib = amdgpu_uvd_ring_test_ib, - .insert_nop = amdgpu_ring_insert_nop, + .insert_nop = uvd_v4_2_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, .begin_use = amdgpu_uvd_ring_begin_use, .end_use = amdgpu_uvd_ring_end_use, -- cgit v1.2.1 From bf83060408fea52eccdcf695f3b4b16c71207691 Mon Sep 17 00:00:00 2001 From: Andrey Grodzovsky Date: Thu, 17 May 2018 11:18:34 -0400 Subject: Remove calls to suspend/resume atomic helpers from amdgpu_device_gpu_recover. (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First of all it's already being called from the display code from amd_ip_funcs.suspend/resume hooks. Second of all, the place in amdgpu_device_gpu_recover it's being called is wrong for GPU stalls since it is called BEFORE we cancel and force completion of all in flight jobs which were not yet processed. So, as Bas pointed in the ticket we will try to wait for fence in amdgpu_pm_compute_clocks but the pipe is hanged so we end up in deadlock. v2: remove unused variable Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=106500 Signed-off-by: Andrey Grodzovsky Reviewed-by: Harry Wentland Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 0e3f69d31b80..adeb48ec4897 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3176,7 +3176,6 @@ error: int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job *job, bool force) { - struct drm_atomic_state *state = NULL; int i, r, resched; if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { @@ -3199,10 +3198,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - /* store modesetting */ - if (amdgpu_device_has_dc_support(adev)) - state = drm_atomic_helper_suspend(adev->ddev); - /* block all schedulers and reset given job's ring */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; @@ -3242,10 +3237,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, kthread_unpark(ring->sched.thread); } - if (amdgpu_device_has_dc_support(adev)) { - if (drm_atomic_helper_resume(adev->ddev, state)) - dev_info(adev->dev, "drm resume failed:%d\n", r); - } else { + if (!amdgpu_device_has_dc_support(adev)) { drm_helper_resume_force_mode(adev->ddev); } -- cgit v1.2.1 From 99631045862e2994b47285a8cc96bc939ae5b42f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 May 2018 14:45:12 -0500 Subject: drm/amdgpu: add new DF 1.7 register defs Reviewed-by: Hawking Zhang Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h | 4 ++++ drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h index 2b305dd021e8..e6044e27a913 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_offset.h @@ -30,4 +30,8 @@ #define mmDF_CS_AON0_DramBaseAddress0 0x0044 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 +#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0 0x0214 +#define mmDF_CS_AON0_CoherentSlaveModeCtrlA0_BASE_IDX 0 + + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h index 2ba849798924..a78c99480e2d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_1_7_sh_mask.h @@ -45,4 +45,8 @@ #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L +//DF_CS_AON0_CoherentSlaveModeCtrlA0 +#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW__SHIFT 0x3 +#define DF_CS_AON0_CoherentSlaveModeCtrlA0__ForceParWrRMW_MASK 0x00000008L + #endif -- cgit v1.2.1 From 8f9b2e506129e6eb0d21d163f361dd68a050b974 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 May 2018 14:59:31 -0500 Subject: drm/amdgpu: add new DF callback for ECC setup The ForceParWrRMW setting needs to be enabled for ECC, but disabled when ECC is not enabled. Reviewed-by: Hawking Zhang Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 03a2c0be0bf2..a59c07590cee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1401,6 +1401,8 @@ struct amdgpu_df_funcs { bool enable); void (*get_clockgating_state)(struct amdgpu_device *adev, u32 *flags); + void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, + bool enable); }; /* Define the HW IP blocks will be used in driver , add more if necessary */ enum amd_hw_ip_block_type { -- cgit v1.2.1 From 1ca2393b7373d5b0e5a356124fb10fc97e143e88 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 May 2018 15:06:55 -0500 Subject: drm/amdgpu: add a df 1.7 implementation of enable_ecc_force_par_wr_rmw Needed for proper memory setup depending on whether ECC is enabled on a particular board. Reviewed-by: Hawking Zhang Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/df_v1_7.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 4ffda996660f..9935371db7ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -102,6 +102,13 @@ static void df_v1_7_get_clockgating_state(struct amdgpu_device *adev, *flags |= AMD_CG_SUPPORT_DF_MGCG; } +static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(DF, 0, DF_CS_AON0_CoherentSlaveModeCtrlA0, + ForceParWrRMW, enable); +} + const struct amdgpu_df_funcs df_v1_7_funcs = { .init = df_v1_7_init, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, @@ -109,4 +116,5 @@ const struct amdgpu_df_funcs df_v1_7_funcs = { .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, .update_medium_grain_clock_gating = df_v1_7_update_medium_grain_clock_gating, .get_clockgating_state = df_v1_7_get_clockgating_state, + .enable_ecc_force_par_wr_rmw = df_v1_7_enable_ecc_force_par_wr_rmw, }; -- cgit v1.2.1 From e1d1a7729a62d7b79fb2ab4ac3bc6fc0ebfb6db9 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 10 May 2018 15:15:12 -0500 Subject: drm/amdgpu/gmc9: disable partial wr rmw if ECC is not enabled The vbios mistakenly sets this bit on some boards without ECC. This can lead to reduced performance in some workloads. Disable the bit if the board does not have ECC. Reviewed-by: Hawking Zhang Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index b60ed288d314..3c0a85d4e4ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -675,6 +675,7 @@ static int gmc_v9_0_late_init(void *handle) DRM_INFO("ECC is active.\n"); } else if (r == 0) { DRM_INFO("ECC is not present.\n"); + adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false); } else { DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r); return r; -- cgit v1.2.1 From 63e138abf0761c7ea3dcb29060bfd48a34e58ecf Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 18 May 2018 14:24:44 +0800 Subject: drm/amd/pp: Fix static checker warning error: uninitialized symbol 'xxxx' Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c | 24 +++++++++------------- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 3 ++- drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c | 6 ++---- 3 files changed, 14 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index ec38c9f50a4d..7047e29755c3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1104,10 +1104,8 @@ int atomctrl_get_voltage_evv_on_sclk( GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), (uint32_t *)&get_voltage_info_param_space); - if (0 != result) - return result; - - *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) + *voltage = result ? 0 : + le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) (&get_voltage_info_param_space))->usVoltageLevel); return result; @@ -1312,8 +1310,7 @@ int atomctrl_read_efuse(struct pp_hwmgr *hwmgr, uint16_t start_index, result = amdgpu_atom_execute_table(adev->mode_info.atom_context, GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), (uint32_t *)&efuse_param); - if (!result) - *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask; + *efuse = result ? 0 : le32_to_cpu(efuse_param.ulEfuseValue) & mask; return result; } @@ -1354,11 +1351,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_ GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), (uint32_t *)&get_voltage_info_param_space); - if (0 != result) - return result; - - *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *) - (&get_voltage_info_param_space))->ulVoltageLevel); + *voltage = result ? 0 : + le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel); return result; } @@ -1552,15 +1546,17 @@ void atomctrl_get_voltage_range(struct pp_hwmgr *hwmgr, uint32_t *max_vddc, case CHIP_FIJI: *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMaxVddc/4); *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_3 *)profile)->ulMinVddc/4); - break; + return; case CHIP_POLARIS11: case CHIP_POLARIS10: case CHIP_POLARIS12: *max_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMaxVddc/100); *min_vddc = le32_to_cpu(((ATOM_ASIC_PROFILING_INFO_V3_6 *)profile)->ulMinVddc/100); - break; - default: return; + default: + break; } } + *max_vddc = 0; + *min_vddc = 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 646c9e9bf681..45e9b8cb169d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -860,7 +860,8 @@ static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - uint32_t min_vddc, max_vddc; + uint32_t min_vddc = 0; + uint32_t max_vddc = 0; if (!table_info) return; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c index 64d33b775906..d644a9bb9078 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c @@ -283,11 +283,9 @@ int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit); - if (result) - return result; + *value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); - *value = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11); - return 0; + return result; } int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit) -- cgit v1.2.1 From e6ee925b795311679dd6e0ebeae6f1dbe983c059 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Fri, 18 May 2018 14:59:46 +0800 Subject: drm/amd/pp: fix a couple locking issues We should return unlock on the error path Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- .../gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c | 31 +++++++++++++--------- 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c index 99b29ff45d91..c952845833d7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c @@ -936,45 +936,49 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr) if (hwmgr->chip_id == CHIP_POLARIS10) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } else if (hwmgr->chip_id == CHIP_POLARIS11) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); if (hwmgr->is_kicker) result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11_Kicker); else result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } else if (hwmgr->chip_id == CHIP_POLARIS12) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris12); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } else if (hwmgr->chip_id == CHIP_VEGAM) { result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_VegaM); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_VegaM); - PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", goto error); } } cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); result = smu7_enable_didt(hwmgr, true); - PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", goto error); if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_EnableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), - "Failed to enable DPM DIDT.", return result); + "Failed to enable DPM DIDT.", goto error); } mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; +error: + mutex_unlock(&adev->grbm_idx_mutex); + adev->gfx.rlc.funcs->exit_safe_mode(adev); + return result; } int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) @@ -992,17 +996,20 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr) result = smu7_enable_didt(hwmgr, false); PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", - return result); + goto error); if (hwmgr->chip_id == CHIP_POLARIS11) { result = smum_send_msg_to_smc(hwmgr, (uint16_t)(PPSMC_MSG_DisableDpmDidt)); PP_ASSERT_WITH_CODE((0 == result), - "Failed to disable DPM DIDT.", return result); + "Failed to disable DPM DIDT.", goto error); } adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; +error: + adev->gfx.rlc.funcs->exit_safe_mode(adev); + return result; } int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr) -- cgit v1.2.1 From 34319b329f73eabd7e3baefecf9f71eb8b86db6f Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 16 May 2018 20:06:53 +0800 Subject: drm/amdgpu: skip CG for VCN when late_init/fini VCN clockgating is handled manually like VCE and UVD. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index adeb48ec4897..290e279abf0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1718,6 +1718,7 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) /* skip CG for VCE/UVD, it's handled specially */ if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* enable clockgating to save power */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1817,6 +1818,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN && adev->ip_blocks[i].version->funcs->set_clockgating_state) { /* ungate blocks before hw fini so that we can shutdown the blocks safely */ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, -- cgit v1.2.1 From ca0b9494633f65ee6779d4c7ca19e799b8308e69 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 16 May 2018 20:09:09 +0800 Subject: drm/amd/pp: Add smu support for VCN powergating on RV Add the powerplay callback for powergating VCN (same as UVD and VCE). Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 017ef2d169e9..85f84f4d8be5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1128,6 +1128,23 @@ static int smu10_set_mmhub_powergating_by_smu(struct pp_hwmgr *hwmgr) return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub); } +static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate) +{ + if (bgate) { + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_PowerDownVcn, 0); + } else { + smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_PowerUpVcn, 0); + amdgpu_device_ip_set_powergating_state(hwmgr->adev, + AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); + } +} + static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .backend_init = smu10_hwmgr_backend_init, .backend_fini = smu10_hwmgr_backend_fini, @@ -1136,7 +1153,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .force_dpm_level = smu10_dpm_force_dpm_level, .get_power_state_size = smu10_get_power_state_size, .powerdown_uvd = NULL, - .powergate_uvd = NULL, + .powergate_uvd = smu10_powergate_vcn, .powergate_vce = NULL, .get_mclk = smu10_dpm_get_mclk, .get_sclk = smu10_dpm_get_sclk, -- cgit v1.2.1 From 8dbb8cdf52dbc264d531d1e51f5e311bd1558c21 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 16 May 2018 20:10:25 +0800 Subject: drm/amdgpu: Add CG/PG flags for VCN Define new clock and powergating flags for VCN block. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 33de33016bda..b178176b72ac 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -92,7 +92,7 @@ enum amd_powergating_state { #define AMD_CG_SUPPORT_GFX_3D_CGLS (1 << 21) #define AMD_CG_SUPPORT_DRM_MGCG (1 << 22) #define AMD_CG_SUPPORT_DF_MGCG (1 << 23) - +#define AMD_CG_SUPPORT_VCN_MGCG (1 << 24) /* PG flags */ #define AMD_PG_SUPPORT_GFX_PG (1 << 0) #define AMD_PG_SUPPORT_GFX_SMG (1 << 1) @@ -108,6 +108,7 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11) #define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) #define AMD_PG_SUPPORT_MMHUB (1 << 13) +#define AMD_PG_SUPPORT_VCN (1 << 14) enum PP_FEATURE_MASK { PP_SCLK_DPM_MASK = 0x1, -- cgit v1.2.1 From ac06b4cfd78b79ec6c8306062801a4276a3e0c79 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 15:58:53 +0800 Subject: drm/amdgpu: Add SOC15_WAIT_ON_RREG macro define Add new macro to wait on a register field to be a specific value. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index def865067edd..0942f492d2e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -47,6 +47,21 @@ #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \ WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value) +#define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask, ret) \ + do { \ + uint32_t tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + uint32_t loop = adev->usec_timeout; \ + while ((tmp_ & (mask)) != (expected_value)) { \ + udelay(2); \ + tmp_ = RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ + loop--; \ + if (!loop) { \ + ret = -ETIMEDOUT; \ + break; \ + } \ + } \ + } while (0) + #endif -- cgit v1.2.1 From c9dc5abb661b02239eef6cd991700707dc381110 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 11:11:22 +0800 Subject: drm/amdgpu: Add static CG control for VCN on RV Implement proper static clockgating support for VCN. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 50 +++++++++++++++++++++++++++-------- 1 file changed, 39 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 7fbbdb1e58da..7a366418d5f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -288,14 +288,14 @@ static void vcn_v1_0_mc_resume(struct amdgpu_device *adev) * * Disable clock gating for VCN block */ -static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) +static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev) { uint32_t data; /* JPEG disable CGC */ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE_MASK; @@ -310,7 +310,7 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) /* UVD disable CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data &= ~ UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; @@ -415,13 +415,13 @@ static void vcn_v1_0_disable_clock_gating(struct amdgpu_device *adev, bool sw) * * Enable clock gating for VCN block */ -static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw) +static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) { uint32_t data = 0; /* enable JPEG CGC */ data = RREG32_SOC15(VCN, 0, mmJPEG_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; @@ -435,7 +435,7 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev, bool sw) /* enable UVD CGC */ data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL); - if (sw) + if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG) data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; else data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT; @@ -500,7 +500,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) vcn_v1_0_mc_resume(adev); /* disable clock gating */ - vcn_v1_0_disable_clock_gating(adev, true); + vcn_v1_0_disable_clock_gating(adev); /* disable interupt */ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_MASTINT_EN), 0, @@ -681,15 +681,43 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* enable clock gating */ - vcn_v1_0_enable_clock_gating(adev, true); + vcn_v1_0_enable_clock_gating(adev); return 0; } +bool vcn_v1_0_is_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); +} + +int vcn_v1_0_wait_for_idle(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int ret = 0; + + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_STATUS, 0x2, 0x2, ret); + + return ret; +} + static int vcn_v1_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { - /* needed for driver unload*/ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_CG_STATE_GATE) ? true : false; + + if (enable) { + /* wait for STATUS to clear */ + if (vcn_v1_0_is_idle(handle)) + return -EBUSY; + vcn_v1_0_enable_clock_gating(adev); + } else { + /* disable HW gating and enable Sw gating */ + vcn_v1_0_disable_clock_gating(adev); + } return 0; } @@ -1072,8 +1100,8 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .hw_fini = vcn_v1_0_hw_fini, .suspend = vcn_v1_0_suspend, .resume = vcn_v1_0_resume, - .is_idle = NULL /* vcn_v1_0_is_idle */, - .wait_for_idle = NULL /* vcn_v1_0_wait_for_idle */, + .is_idle = vcn_v1_0_is_idle, + .wait_for_idle = vcn_v1_0_wait_for_idle, .check_soft_reset = NULL /* vcn_v1_0_check_soft_reset */, .pre_soft_reset = NULL /* vcn_v1_0_pre_soft_reset */, .soft_reset = NULL /* vcn_v1_0_soft_reset */, -- cgit v1.2.1 From 79953a60e4476be90fa1767fbf49a76b6a8b01ef Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 11:13:51 +0800 Subject: drm/amdgpu: Enable VCN CG by default on RV Enable VCN clockgating by default on Raven. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0e4f67e4c875..2cf9a188131d 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -710,7 +710,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_MC_MGCG | AMD_CG_SUPPORT_MC_LS | AMD_CG_SUPPORT_SDMA_MGCG | - AMD_CG_SUPPORT_SDMA_LS; + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_VCN_MGCG; adev->pg_flags = AMD_PG_SUPPORT_SDMA; if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) -- cgit v1.2.1 From d58c5d9a42050c93f17ba82aaff0f34a30761ac7 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 16:07:02 +0800 Subject: drm/amdgpu: Add VCN static PG support on RV Implement static powergating suport on VCN. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 11 ++++ drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 92 ++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 2fd7db891689..181e6afa9847 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -45,6 +45,17 @@ #define VCN_ENC_CMD_REG_WRITE 0x0000000b #define VCN_ENC_CMD_REG_WAIT 0x0000000c +enum engine_status_constants { + UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0, + UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002, + UVD_STATUS__UVD_BUSY = 0x00000004, + GB_ADDR_CONFIG_DEFAULT = 0x26010011, + UVD_STATUS__IDLE = 0x2, + UVD_STATUS__BUSY = 0x5, + UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1, + UVD_STATUS__RBC_BUSY = 0x1, +}; + struct amdgpu_vcn { struct amdgpu_bo *vcpu_bo; void *cpu_addr; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 7a366418d5f4..dcb60ee0d9e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -480,6 +480,94 @@ static void vcn_v1_0_enable_clock_gating(struct amdgpu_device *adev) WREG32_SOC15(VCN, 0, mmUVD_SUVD_CGC_CTRL, data); } +static void vcn_1_0_disable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + int ret; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON, 0xFFFFFF, ret); + } else { + data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 1 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, 0, 0xFFFFFFFF, ret); + } + + /* polling UVD_PGFSM_STATUS to confirm UVDM_PWR_STATUS , UVDU_PWR_STATUS are 0 (power on) */ + + data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); + data &= ~0x103; + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) + data |= UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON | UVD_POWER_STATUS__UVD_PG_EN_MASK; + + WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); +} + +static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev) +{ + uint32_t data = 0; + int ret; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { + /* Before power off, this indicator has to be turned on */ + data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS); + data &= ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK; + data |= UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF; + WREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS, data); + + + data = (2 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDF_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDC_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDB_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIL_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDIR_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTD_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDTE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDE_PWR_CONFIG__SHIFT + | 2 << UVD_PGFSM_CONFIG__UVDW_PWR_CONFIG__SHIFT); + + WREG32_SOC15(VCN, 0, mmUVD_PGFSM_CONFIG, data); + + data = (2 << UVD_PGFSM_STATUS__UVDM_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDU_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDF_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDC_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDB_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDIL_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDIR_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTD_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDTE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDE_PWR_STATUS__SHIFT + | 2 << UVD_PGFSM_STATUS__UVDW_PWR_STATUS__SHIFT); + SOC15_WAIT_ON_RREG(VCN, 0, mmUVD_PGFSM_STATUS, data, 0xFFFFFFFF, ret); + } +} + /** * vcn_v1_0_start - start VCN block * @@ -499,6 +587,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev) vcn_v1_0_mc_resume(adev); + vcn_1_0_disable_static_power_gating(adev); /* disable clock gating */ vcn_v1_0_disable_clock_gating(adev); @@ -681,8 +770,9 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* enable clock gating */ - vcn_v1_0_enable_clock_gating(adev); + vcn_v1_0_enable_clock_gating(adev); + vcn_1_0_enable_static_power_gating(adev); return 0; } -- cgit v1.2.1 From 61c8e90d965ba944d8b56c29c2c7bb9ec34f45d5 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Thu, 17 May 2018 16:03:47 +0800 Subject: drm/amdgpu: Enable VCN static PG by default on RV Enable static VCN powergating by default on Raven. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 2cf9a188131d..68b4a22a8892 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -712,7 +712,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA; + + adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | -- cgit v1.2.1 From 22cc6c5e1958e5a08b4c44203d1810ab07ce5a16 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 16 May 2018 20:18:22 +0800 Subject: drm/amdgpu: Add runtime VCN PG support Enable support for dynamically powering up/down VCN on demand. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 16 +++++++++------- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 32 +++++++++++++++++++++----------- 2 files changed, 30 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 60468385e6b4..8851bcdfc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -212,11 +212,11 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work) } if (fences == 0) { - if (adev->pm.dpm_enabled) { - /* might be used when with pg/cg + if (adev->pm.dpm_enabled) amdgpu_dpm_enable_uvd(adev, false); - */ - } + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_GATE); } else { schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); } @@ -228,9 +228,11 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring) bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); if (set_clocks && adev->pm.dpm_enabled) { - /* might be used when with pg/cg - amdgpu_dpm_enable_uvd(adev, true); - */ + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + else + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, + AMD_PG_STATE_UNGATE); } } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index dcb60ee0d9e2..110b294ebed3 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -35,7 +35,6 @@ #include "mmhub/mmhub_9_1_offset.h" #include "mmhub/mmhub_9_1_sh_mask.h" -static int vcn_v1_0_start(struct amdgpu_device *adev); static int vcn_v1_0_stop(struct amdgpu_device *adev); static void vcn_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v1_0_set_enc_ring_funcs(struct amdgpu_device *adev); @@ -146,10 +145,6 @@ static int vcn_v1_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.ring_dec; int i, r; - r = vcn_v1_0_start(adev); - if (r) - goto done; - ring->ready = true; r = amdgpu_ring_test_ring(ring); if (r) { @@ -185,11 +180,9 @@ static int vcn_v1_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring = &adev->vcn.ring_dec; - int r; - r = vcn_v1_0_stop(adev); - if (r) - return r; + if (RREG32_SOC15(VCN, 0, mmUVD_STATUS)) + vcn_v1_0_stop(adev); ring->ready = false; @@ -769,7 +762,7 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_LMI_CTRL2), 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); - /* enable clock gating */ + WREG32_SOC15(VCN, 0, mmUVD_STATUS, 0); vcn_v1_0_enable_clock_gating(adev); vcn_1_0_enable_static_power_gating(adev); @@ -1179,6 +1172,23 @@ static void vcn_v1_0_dec_ring_insert_nop(struct amdgpu_ring *ring, uint32_t coun } } +static int vcn_v1_0_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + /* This doesn't actually powergate the VCN block. + * That's done in the dpm code via the SMC. This + * just re-inits the block as necessary. The actual + * gating still happens in the dpm code. We should + * revisit this when there is a cleaner line between + * the smc and the hw blocks + */ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (state == AMD_PG_STATE_GATE) + return vcn_v1_0_stop(adev); + else + return vcn_v1_0_start(adev); +} static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .name = "vcn_v1_0", @@ -1197,7 +1207,7 @@ static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { .soft_reset = NULL /* vcn_v1_0_soft_reset */, .post_soft_reset = NULL /* vcn_v1_0_post_soft_reset */, .set_clockgating_state = vcn_v1_0_set_clockgating_state, - .set_powergating_state = NULL /* vcn_v1_0_set_powergating_state */, + .set_powergating_state = vcn_v1_0_set_powergating_state, }; static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = { -- cgit v1.2.1 From af4c0f650b563c7b30c1d8cd2bb926247ceb19cc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 10:56:02 +0200 Subject: drm/amdgpu: rework VM state machine lock handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Only the moved state needs a separate spin lock protection. All other states are protected by reserving the VM anyway. v2: fix some more incorrect cases Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 66 +++++++++++----------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 4 +-- 2 files changed, 21 insertions(+), 49 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1a8f4e0dd023..f0deedcaf1c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -119,9 +119,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, * is currently evicted. add the bo to the evicted list to make sure it * is validated on next vm use to avoid fault. * */ - spin_lock(&vm->status_lock); list_move_tail(&base->vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); } /** @@ -228,7 +226,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct ttm_bo_global *glob = adev->mman.bdev.glob; int r; - spin_lock(&vm->status_lock); while (!list_empty(&vm->evicted)) { struct amdgpu_vm_bo_base *bo_base; struct amdgpu_bo *bo; @@ -236,10 +233,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, bo_base = list_first_entry(&vm->evicted, struct amdgpu_vm_bo_base, vm_status); - spin_unlock(&vm->status_lock); bo = bo_base->bo; - BUG_ON(!bo); if (bo->parent) { r = validate(param, bo); if (r) @@ -259,13 +254,14 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, return r; } - spin_lock(&vm->status_lock); - if (bo->tbo.type != ttm_bo_type_kernel) + if (bo->tbo.type != ttm_bo_type_kernel) { + spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); - else + spin_unlock(&vm->moved_lock); + } else { list_move(&bo_base->vm_status, &vm->relocated); + } } - spin_unlock(&vm->status_lock); return 0; } @@ -279,13 +275,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool ready; - - spin_lock(&vm->status_lock); - ready = list_empty(&vm->evicted); - spin_unlock(&vm->status_lock); - - return ready; + return list_empty(&vm->evicted); } /** @@ -477,9 +467,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - spin_lock(&vm->status_lock); list_move(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); } if (level < AMDGPU_VM_PTB) { @@ -926,10 +914,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, if (!entry->base.bo) continue; - spin_lock(&vm->status_lock); if (list_empty(&entry->base.vm_status)) list_add(&entry->base.vm_status, &vm->relocated); - spin_unlock(&vm->status_lock); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -974,7 +960,6 @@ restart: params.func = amdgpu_vm_do_set_ptes; } - spin_lock(&vm->status_lock); while (!list_empty(&vm->relocated)) { struct amdgpu_vm_bo_base *bo_base, *parent; struct amdgpu_vm_pt *pt, *entry; @@ -984,13 +969,10 @@ restart: struct amdgpu_vm_bo_base, vm_status); list_del_init(&bo_base->vm_status); - spin_unlock(&vm->status_lock); bo = bo_base->bo->parent; - if (!bo) { - spin_lock(&vm->status_lock); + if (!bo) continue; - } parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base, bo_list); @@ -999,12 +981,10 @@ restart: amdgpu_vm_update_pde(¶ms, vm, pt, entry); - spin_lock(&vm->status_lock); if (!vm->use_cpu_for_update && (ndw - params.ib->length_dw) < 32) break; } - spin_unlock(&vm->status_lock); if (vm->use_cpu_for_update) { /* Flush HDP */ @@ -1107,9 +1087,7 @@ static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p, if (entry->huge) { /* Add the entry to the relocated list to update it. */ entry->huge = false; - spin_lock(&p->vm->status_lock); list_move(&entry->base.vm_status, &p->vm->relocated); - spin_unlock(&p->vm->status_lock); } return; } @@ -1588,8 +1566,9 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, amdgpu_asic_flush_hdp(adev, NULL); } - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del_init(&bo_va->base.vm_status); + spin_unlock(&vm->moved_lock); /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the @@ -1599,7 +1578,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, !(bo->preferred_domains & amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))) list_add_tail(&bo_va->base.vm_status, &vm->evicted); - spin_unlock(&vm->status_lock); list_splice_init(&bo_va->invalids, &bo_va->valids); bo_va->cleared = clear; @@ -1811,14 +1789,14 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, bool clear; int r = 0; - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); while (!list_empty(&vm->moved)) { struct amdgpu_bo_va *bo_va; struct reservation_object *resv; bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); resv = bo_va->base.bo->tbo.resv; @@ -1839,9 +1817,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, if (!clear && resv != vm->root.base.bo->tbo.resv) reservation_object_unlock(resv); - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); } - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); return r; } @@ -1903,10 +1881,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, amdgpu_vm_prt_get(adev); if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); if (list_empty(&bo_va->base.vm_status)) list_add(&bo_va->base.vm_status, &vm->moved); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); } @@ -2216,9 +2194,9 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_del(&bo_va->base.bo_list); - spin_lock(&vm->status_lock); + spin_lock(&vm->moved_lock); list_del(&bo_va->base.vm_status); - spin_unlock(&vm->status_lock); + spin_unlock(&vm->moved_lock); list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { list_del(&mapping->list); @@ -2261,28 +2239,24 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { - spin_lock(&bo_base->vm->status_lock); if (bo->tbo.type == ttm_bo_type_kernel) list_move(&bo_base->vm_status, &vm->evicted); else list_move_tail(&bo_base->vm_status, &vm->evicted); - spin_unlock(&bo_base->vm->status_lock); continue; } if (bo->tbo.type == ttm_bo_type_kernel) { - spin_lock(&bo_base->vm->status_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->relocated); - spin_unlock(&bo_base->vm->status_lock); continue; } - spin_lock(&bo_base->vm->status_lock); + spin_lock(&bo_base->vm->moved_lock); if (list_empty(&bo_base->vm_status)) list_add(&bo_base->vm_status, &vm->moved); - spin_unlock(&bo_base->vm->status_lock); + spin_unlock(&bo_base->vm->moved_lock); } } @@ -2391,9 +2365,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->va = RB_ROOT_CACHED; for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) vm->reserved_vmid[i] = NULL; - spin_lock_init(&vm->status_lock); INIT_LIST_HEAD(&vm->evicted); INIT_LIST_HEAD(&vm->relocated); + spin_lock_init(&vm->moved_lock); INIT_LIST_HEAD(&vm->moved); INIT_LIST_HEAD(&vm->freed); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index d6827083572a..0196b9a782f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -168,9 +168,6 @@ struct amdgpu_vm { /* tree of virtual addresses mapped */ struct rb_root_cached va; - /* protecting invalidated */ - spinlock_t status_lock; - /* BOs who needs a validation */ struct list_head evicted; @@ -179,6 +176,7 @@ struct amdgpu_vm { /* BOs moved, but not yet updated in the PT */ struct list_head moved; + spinlock_t moved_lock; /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; -- cgit v1.2.1 From 91ccdd24a1955dbec97a6d61322be214b7de1974 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 11:02:54 +0200 Subject: drm/amdgpu: cleanup amdgpu_vm_validate_pt_bos v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use list_for_each_entry_safe here. v2: Drop the optimization, it doesn't work as expected. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f0deedcaf1c9..3be4d5fc60b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -224,21 +224,16 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, void *param) { struct ttm_bo_global *glob = adev->mman.bdev.glob; - int r; - - while (!list_empty(&vm->evicted)) { - struct amdgpu_vm_bo_base *bo_base; - struct amdgpu_bo *bo; + struct amdgpu_vm_bo_base *bo_base, *tmp; + int r = 0; - bo_base = list_first_entry(&vm->evicted, - struct amdgpu_vm_bo_base, - vm_status); + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { + struct amdgpu_bo *bo = bo_base->bo; - bo = bo_base->bo; if (bo->parent) { r = validate(param, bo); if (r) - return r; + break; spin_lock(&glob->lru_lock); ttm_bo_move_to_lru_tail(&bo->tbo); @@ -251,7 +246,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update) { r = amdgpu_bo_kmap(bo, NULL); if (r) - return r; + break; } if (bo->tbo.type != ttm_bo_type_kernel) { @@ -263,7 +258,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } - return 0; + return r; } /** -- cgit v1.2.1 From 789f3317ed33e34fa97c8918c075c68a62e51a4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 11:08:24 +0200 Subject: drm/amdgpu: further optimize amdgpu_vm_handle_moved MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Splice the moved list to a local one to avoid taking the lock over and over again. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3be4d5fc60b3..4d88b060fbde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1781,19 +1781,18 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, int amdgpu_vm_handle_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm) { + struct amdgpu_bo_va *bo_va, *tmp; + struct list_head moved; bool clear; - int r = 0; + int r; + INIT_LIST_HEAD(&moved); spin_lock(&vm->moved_lock); - while (!list_empty(&vm->moved)) { - struct amdgpu_bo_va *bo_va; - struct reservation_object *resv; - - bo_va = list_first_entry(&vm->moved, - struct amdgpu_bo_va, base.vm_status); - spin_unlock(&vm->moved_lock); + list_splice_init(&vm->moved, &moved); + spin_unlock(&vm->moved_lock); - resv = bo_va->base.bo->tbo.resv; + list_for_each_entry_safe(bo_va, tmp, &moved, base.vm_status) { + struct reservation_object *resv = bo_va->base.bo->tbo.resv; /* Per VM BOs never need to bo cleared in the page tables */ if (resv == vm->root.base.bo->tbo.resv) @@ -1806,17 +1805,19 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, clear = true; r = amdgpu_vm_bo_update(adev, bo_va, clear); - if (r) + if (r) { + spin_lock(&vm->moved_lock); + list_splice(&moved, &vm->moved); + spin_unlock(&vm->moved_lock); return r; + } if (!clear && resv != vm->root.base.bo->tbo.resv) reservation_object_unlock(resv); - spin_lock(&vm->moved_lock); } - spin_unlock(&vm->moved_lock); - return r; + return 0; } /** -- cgit v1.2.1 From a7f91061c60ad9cac2e6a03b642be6a4f88b3662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 13:58:42 +0200 Subject: drm/amdgpu: kmap PDs/PTs in amdgpu_vm_update_directories MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In theory it is possible that PDs/PTs can move without eviction. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 4d88b060fbde..a31afac8e8e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -242,13 +242,6 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_unlock(&glob->lru_lock); } - if (bo->tbo.type == ttm_bo_type_kernel && - vm->use_cpu_for_update) { - r = amdgpu_bo_kmap(bo, NULL); - if (r) - break; - } - if (bo->tbo.type != ttm_bo_type_kernel) { spin_lock(&vm->moved_lock); list_move(&bo_base->vm_status, &vm->moved); @@ -940,6 +933,14 @@ restart: params.adev = adev; if (vm->use_cpu_for_update) { + struct amdgpu_vm_bo_base *bo_base; + + list_for_each_entry(bo_base, &vm->relocated, vm_status) { + r = amdgpu_bo_kmap(bo_base->bo, NULL); + if (unlikely(r)) + return r; + } + r = amdgpu_vm_wait_pd(adev, vm, AMDGPU_FENCE_OWNER_VM); if (unlikely(r)) return r; -- cgit v1.2.1 From 862b8c5762e4e2324d18c881ce86062af72b2063 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 14:22:56 +0200 Subject: drm/amdgpu: consistenly use VM moved flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of sometimes checking if the vm_status is empty use the moved flag and also reset it when the BO leaves the state machine. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index a31afac8e8e9..f5dee4c6757c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -902,8 +902,8 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev, if (!entry->base.bo) continue; - if (list_empty(&entry->base.vm_status)) - list_add(&entry->base.vm_status, &vm->relocated); + if (!entry->base.moved) + list_move(&entry->base.vm_status, &vm->relocated); amdgpu_vm_invalidate_level(adev, vm, entry, level + 1); } } @@ -964,6 +964,7 @@ restart: bo_base = list_first_entry(&vm->relocated, struct amdgpu_vm_bo_base, vm_status); + bo_base->moved = false; list_del_init(&bo_base->vm_status); bo = bo_base->bo->parent; @@ -1877,10 +1878,10 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, if (mapping->flags & AMDGPU_PTE_PRT) amdgpu_vm_prt_get(adev); - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && + !bo_va->base.moved) { spin_lock(&vm->moved_lock); - if (list_empty(&bo_va->base.vm_status)) - list_add(&bo_va->base.vm_status, &vm->moved); + list_move(&bo_va->base.vm_status, &vm->moved); spin_unlock(&vm->moved_lock); } trace_amdgpu_vm_bo_map(bo_va, mapping); @@ -2233,6 +2234,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, list_for_each_entry(bo_base, &bo->va, bo_list) { struct amdgpu_vm *vm = bo_base->vm; + bool was_moved = bo_base->moved; bo_base->moved = true; if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { @@ -2244,16 +2246,16 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, continue; } - if (bo->tbo.type == ttm_bo_type_kernel) { - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, &vm->relocated); + if (was_moved) continue; - } - spin_lock(&bo_base->vm->moved_lock); - if (list_empty(&bo_base->vm_status)) - list_add(&bo_base->vm_status, &vm->moved); - spin_unlock(&bo_base->vm->moved_lock); + if (bo->tbo.type == ttm_bo_type_kernel) { + list_move(&bo_base->vm_status, &vm->relocated); + } else { + spin_lock(&bo_base->vm->moved_lock); + list_move(&bo_base->vm_status, &vm->moved); + spin_unlock(&bo_base->vm->moved_lock); + } } } -- cgit v1.2.1 From 806f043f0253a76248c554ce9f7303bc25e43314 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Thu, 19 Apr 2018 15:01:12 +0200 Subject: drm/amdgpu: move VM BOs on LRU again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move all BOs belonging to a VM on the LRU with every submission. Signed-off-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 28 +++++++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h | 3 +++ 2 files changed, 26 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index f5dee4c6757c..ccba88cc8c54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -251,6 +251,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, } } + spin_lock(&glob->lru_lock); + list_for_each_entry(bo_base, &vm->idle, vm_status) { + struct amdgpu_bo *bo = bo_base->bo; + + if (!bo->parent) + continue; + + ttm_bo_move_to_lru_tail(&bo->tbo); + if (bo->shadow) + ttm_bo_move_to_lru_tail(&bo->shadow->tbo); + } + spin_unlock(&glob->lru_lock); + return r; } @@ -965,7 +978,7 @@ restart: struct amdgpu_vm_bo_base, vm_status); bo_base->moved = false; - list_del_init(&bo_base->vm_status); + list_move(&bo_base->vm_status, &vm->idle); bo = bo_base->bo->parent; if (!bo) @@ -1571,10 +1584,14 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, * the evicted list so that it gets validated again on the * next command submission. */ - if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && - !(bo->preferred_domains & - amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))) - list_add_tail(&bo_va->base.vm_status, &vm->evicted); + if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { + uint32_t mem_type = bo->tbo.mem.mem_type; + + if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + list_add_tail(&bo_va->base.vm_status, &vm->evicted); + else + list_add(&bo_va->base.vm_status, &vm->idle); + } list_splice_init(&bo_va->invalids, &bo_va->valids); bo_va->cleared = clear; @@ -2368,6 +2385,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, INIT_LIST_HEAD(&vm->relocated); spin_lock_init(&vm->moved_lock); INIT_LIST_HEAD(&vm->moved); + INIT_LIST_HEAD(&vm->idle); INIT_LIST_HEAD(&vm->freed); /* create scheduler entity for page table updates */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 0196b9a782f2..061b99a18cb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -178,6 +178,9 @@ struct amdgpu_vm { struct list_head moved; spinlock_t moved_lock; + /* All BOs of this VM not currently in the state machine */ + struct list_head idle; + /* BO mappings freed, but not yet updated in the PT */ struct list_head freed; -- cgit v1.2.1 From bf20f0ab544d8982af375c87e3d870d45237eecc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 21 May 2018 10:16:28 -0500 Subject: drm/amdgpu/pp: remove duplicate assignment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit is_dpm_running callback was assigned to the same value twice. Drop the duplicate. Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c index a40f7141131c..2de48959ac93 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c @@ -2379,6 +2379,5 @@ const struct pp_smumgr_func vegam_smu_funcs = { .update_sclk_threshold = vegam_update_sclk_threshold, .is_hw_avfs_present = vegam_is_hw_avfs_present, .thermal_avfs_enable = vegam_thermal_avfs_enable, - .is_dpm_running = vegam_is_dpm_running, .thermal_setup_fan_table = vegam_thermal_setup_fan_table, }; -- cgit v1.2.1 From f9fb22a21b380b14f70048fe719875e3523ac7d8 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Tue, 22 May 2018 11:45:41 -0400 Subject: drm/amdgpu: Update GFX info structure to match what vega20 used Update to the latest version from the vbios team. Signed-off-by: Shaoyun Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 8 +++----- drivers/gpu/drm/amd/include/atomfirmware.h | 3 ++- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 7014d5875d5b..236915849cfe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -354,11 +354,9 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) le16_to_cpu(gfx_info->v24.gc_gsprim_buff_depth); adev->gfx.config.double_offchip_lds_buf = gfx_info->v24.gc_double_offchip_lds_buffer; - adev->gfx.cu_info.wave_front_size = gfx_info->v24.gc_wave_size; - adev->gfx.cu_info.max_waves_per_simd = - le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); - adev->gfx.cu_info.max_scratch_slots_per_cu = - gfx_info->v24.gc_max_scratch_slots_per_cu; + adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v24.gc_wave_size); + adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v24.gc_max_waves_per_simd); + adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu; adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size); return 0; default: diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index fd5e80c92ed0..c6c1666ac120 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -1240,7 +1240,6 @@ struct atom_gfx_info_v2_4 { uint8_t active_cu_per_sh; uint8_t active_rb_per_se; uint16_t gcgoldenoffset; - uint32_t rm21_sram_vmin_value; uint16_t gc_num_gprs; uint16_t gc_gsprim_buff_depth; uint16_t gc_parameter_cache_depth; @@ -1251,6 +1250,8 @@ struct atom_gfx_info_v2_4 { uint8_t gc_gs_table_depth; uint8_t gc_double_offchip_lds_buffer; uint8_t gc_max_scratch_slots_per_cu; + uint32_t sram_rm_fuses_val; + uint32_t sram_custom_rm_fuses_val; }; /* -- cgit v1.2.1 From b8f3439fa5358ac84d29fa2f4afa115500dec74c Mon Sep 17 00:00:00 2001 From: David Francis Date: Thu, 24 May 2018 10:40:12 -0400 Subject: drm/amd/display: Remove use of division operator for long longs In fixed31_32.h, in dc_fixpt_shl,'/' was used for division of one long long int by another long long int. As there is no inbuilt long long int division function in c, gcc inserted its own. However, gcc does not link the library that contains this function. To avoid this, use bitwise operators instead of / Reviewed-by: Alex Deucher Signed-off-by: David Francis Reviewed-by: Dmytro Laktyushkin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 76f64e910422..bb0d4ebba9f0 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -209,7 +209,7 @@ static inline struct fixed31_32 dc_fixpt_clamp( static inline struct fixed31_32 dc_fixpt_shl(struct fixed31_32 arg, unsigned char shift) { ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) || - ((arg.value < 0) && (arg.value >= (LLONG_MIN / (1LL << shift))))); + ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift)))); arg.value = arg.value << shift; -- cgit v1.2.1 From c3032fd9673468783bb20326ead823a2f321a522 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Thu, 24 May 2018 14:07:14 -0700 Subject: drm/amdgpu: Use dev_info() to report amdkfd is not supported for this ASIC This is an important message, so it should be visible to users without having to enable extra debugging. Signed-off-by: Tom Stellard Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 95fcbd8a4bf3..8f6f45567bfa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -103,7 +103,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); break; default: - dev_dbg(adev->dev, "kfd not supported on this ASIC\n"); + dev_info(adev->dev, "kfd not supported on this ASIC\n"); return; } -- cgit v1.2.1 From ebe1d22b57b86b6739f2739b5a0f52435596d84d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 25 May 2018 17:50:09 +0200 Subject: drm/amdgpu: fix 32-bit build warning Casting a pointer to a 64-bit type causes a warning on 32-bit targets: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:24: error: cast from pointer to integer of different size [-Werror=pointer-to-int-cast] lower_32_bits((uint64_t)wptr)); ^ drivers/gpu/drm/amd/amdgpu/amdgpu.h:1701:53: note: in definition of macro 'WREG32' #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) ^ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c:473:10: note: in expansion of macro 'lower_32_bits' lower_32_bits((uint64_t)wptr)); ^~~~~~~~~~~~~ The correct method is to cast to 'uintptr_t'. Fixes: d5a114a6c5f7 ("drm/amdgpu: Add GFXv9 kfd2kgd interface functions") Signed-off-by: Arnd Bergmann Reviewed-by: Oded Gabbay Signed-off-by: Oded Gabbay --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8f37991df61b..f0c0d3953f69 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -470,9 +470,9 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), - lower_32_bits((uint64_t)wptr)); + lower_32_bits((uintptr_t)wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), - upper_32_bits((uint64_t)wptr)); + upper_32_bits((uintptr_t)wptr)); WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1), get_queue_mask(adev, pipe_id, queue_id)); } -- cgit v1.2.1 From bfdec234047889f4f6af1ec45c7c502a4405b3fb Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Fri, 18 May 2018 17:07:06 -0400 Subject: drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency This is required so we use the correct minimum clocks for Vega. Without this pplib will never be able to enter the lowest clock states. Signed-off-by: Harry Wentland Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 46 +++++++++++++++++++++- 1 file changed, 44 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index 0229c7edb8ad..ead3d21545b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -234,6 +234,34 @@ static void pp_to_dc_clock_levels( } } +static void pp_to_dc_clock_levels_with_latency( + const struct pp_clock_levels_with_latency *pp_clks, + struct dm_pp_clock_levels_with_latency *clk_level_info, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->num_levels, + DM_PP_MAX_CLOCK_LEVELS); + + clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + clk_level_info->num_levels = pp_clks->num_levels; + + DRM_DEBUG("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < clk_level_info->num_levels; i++) { + DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); + /* translate 10kHz to kHz */ + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + clk_level_info->data[i].latency_in_us = pp_clks->data[i].clocks_in_khz; + } +} + bool dm_pp_get_clock_levels_by_type( const struct dc_context *ctx, enum dm_pp_clock_type clk_type, @@ -311,8 +339,22 @@ bool dm_pp_get_clock_levels_by_type_with_latency( enum dm_pp_clock_type clk_type, struct dm_pp_clock_levels_with_latency *clk_level_info) { - /* TODO: to be implemented */ - return false; + struct amdgpu_device *adev = ctx->driver_context; + void *pp_handle = adev->powerplay.pp_handle; + struct pp_clock_levels_with_latency pp_clks = { 0 }; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_clock_by_type_with_latency) + return false; + + if (pp_funcs->get_clock_by_type_with_latency(pp_handle, + dc_to_pp_clock_type(clk_type), + &pp_clks)) + return false; + + pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); + + return true; } bool dm_pp_get_clock_levels_by_type_with_voltage( -- cgit v1.2.1 From adea72c5046f7faffff969ece04c3f31e669edf4 Mon Sep 17 00:00:00 2001 From: kbuild test robot Date: Fri, 25 May 2018 02:54:45 +0800 Subject: drm/amdgpu: vcn_v1_0_is_idle() can be static Fixes: 9b4c412a654c ("drm/amdgpu: Add static CG control for VCN on RV") Signed-off-by: kbuild test robot Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 110b294ebed3..29684c3ea4ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -769,14 +769,14 @@ static int vcn_v1_0_stop(struct amdgpu_device *adev) return 0; } -bool vcn_v1_0_is_idle(void *handle) +static bool vcn_v1_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; return (RREG32_SOC15(VCN, 0, mmUVD_STATUS) == 0x2); } -int vcn_v1_0_wait_for_idle(void *handle) +static int vcn_v1_0_wait_for_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; int ret = 0; -- cgit v1.2.1 From 10dd2b865393bb45526ca342fe69207341f89fd5 Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Tue, 29 May 2018 09:59:13 -0400 Subject: drm/amd/display: Fix wrong latency assignment for VEGA clock levels Also drop wrong 10kHz comment Fixes: drm/amd/display: Implement dm_pp_get_clock_levels_by_type_with_latency Signed-off-by: Harry Wentland Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index ead3d21545b1..d5e6b45fd6e6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -256,9 +256,8 @@ static void pp_to_dc_clock_levels_with_latency( for (i = 0; i < clk_level_info->num_levels; i++) { DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz); - /* translate 10kHz to kHz */ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; - clk_level_info->data[i].latency_in_us = pp_clks->data[i].clocks_in_khz; + clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; } } -- cgit v1.2.1 From 7bee0572e31f1f5963ecc19d55f64f384d0b155b Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Mon, 28 May 2018 11:22:17 -0400 Subject: drm/amd/pp: Add cases for getting phys and disp clks for SMU10 Add case options to retrieve either physical or display clocks with voltage from SMU controller that are needed by display driver. Signed-off-by: Mikita Lipski Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 85f84f4d8be5..e160b0577e66 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -1000,6 +1000,12 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, case amd_pp_soc_clock: pclk_vol_table = pinfo->vdd_dep_on_socclk; break; + case amd_pp_disp_clock: + pclk_vol_table = pinfo->vdd_dep_on_dispclk; + break; + case amd_pp_phy_clock: + pclk_vol_table = pinfo->vdd_dep_on_phyclk; + break; default: return -EINVAL; } -- cgit v1.2.1 From bda31a24dc5c03fd76832c4d672fba8355e3aa44 Mon Sep 17 00:00:00 2001 From: Deepak Sharma Date: Tue, 22 May 2018 15:31:23 -0700 Subject: drm/amdgpu: Use GTT for dumb buffer if sg display enabled (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When vram size <= THRESHOLD(256M) lets use GTT for dumb buffer allocation. As SG will be enabled with vram size <= 256M scan out will not be an issue. v2: Use amdgpu_display_supported_domains to get supported domain. Signed-off-by: Deepak Sharma Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 2c8e27370284..63758db5e2ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -30,6 +30,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_display.h" void amdgpu_gem_object_free(struct drm_gem_object *gobj) { @@ -749,15 +750,20 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct amdgpu_device *adev = dev->dev_private; struct drm_gem_object *gobj; uint32_t handle; + u32 domain = amdgpu_display_supported_domains(adev); int r; args->pitch = amdgpu_align_pitch(adev, args->width, DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); + if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) + domain = AMDGPU_GEM_DOMAIN_GTT; + } - r = amdgpu_gem_object_create(adev, args->size, 0, - AMDGPU_GEM_DOMAIN_VRAM, + r = amdgpu_gem_object_create(adev, args->size, 0, domain, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, false, NULL, &gobj); if (r) -- cgit v1.2.1 From 84b74608442d00fbdcd233e3230b3068b0ab9b18 Mon Sep 17 00:00:00 2001 From: Deepak Sharma Date: Fri, 25 May 2018 17:12:29 -0700 Subject: drm/amdgpu: Add helper function to get buffer domain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move logic of getting supported domain to a helper function Signed-off-by: Deepak Sharma Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 10 +++------- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 17 ++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 3 ++- 3 files changed, 17 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 63758db5e2ea..556406a44da3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -750,19 +750,15 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct amdgpu_device *adev = dev->dev_private; struct drm_gem_object *gobj; uint32_t handle; - u32 domain = amdgpu_display_supported_domains(adev); + u32 domain; int r; args->pitch = amdgpu_align_pitch(adev, args->width, DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); - if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { - domain = AMDGPU_GEM_DOMAIN_VRAM; - if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) - domain = AMDGPU_GEM_DOMAIN_GTT; - } - + domain = amdgpu_bo_get_preferred_pin_domain(adev, + amdgpu_display_supported_domains(adev)); r = amdgpu_gem_object_create(adev, args->size, 0, domain, AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, false, NULL, &gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6a9e46ae7f0a..5e4e1bd90383 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -703,11 +703,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, /* This assumes only APU display buffers are pinned with (VRAM|GTT). * See function amdgpu_display_supported_domains() */ - if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { - domain = AMDGPU_GEM_DOMAIN_VRAM; - if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) - domain = AMDGPU_GEM_DOMAIN_GTT; - } + domain = amdgpu_bo_get_preferred_pin_domain(adev, domain); if (bo->pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; @@ -1066,3 +1062,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) return bo->tbo.offset; } + +uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, + uint32_t domain) +{ + if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) { + domain = AMDGPU_GEM_DOMAIN_VRAM; + if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD) + domain = AMDGPU_GEM_DOMAIN_GTT; + } + return domain; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 540e03fa159f..731748033878 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -289,7 +289,8 @@ int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev, struct reservation_object *resv, struct dma_fence **fence, bool direct); - +uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, + uint32_t domain); /* * sub allocation -- cgit v1.2.1 From 4c6530fd66399182d0332c5ed821ea473bdcd7c3 Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Fri, 25 May 2018 10:53:39 -0400 Subject: drm/amdgpu: remove unnecessary scheduler entity for VCN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It should be stateless, and no need for scheduler to take care specially. Signed-off-by: Leo Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 51 +++++++-------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h | 2 -- 2 files changed, 10 insertions(+), 43 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 8851bcdfc260..6fd606f90cb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -49,8 +49,6 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work); int amdgpu_vcn_sw_init(struct amdgpu_device *adev) { - struct amdgpu_ring *ring; - struct drm_sched_rq *rq; unsigned long bo_size; const char *fw_name; const struct common_firmware_header *hdr; @@ -102,24 +100,6 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) return r; } - ring = &adev->vcn.ring_dec; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_dec, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCN dec run queue.\n"); - return r; - } - - ring = &adev->vcn.ring_enc[0]; - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - r = drm_sched_entity_init(&ring->sched, &adev->vcn.entity_enc, - rq, NULL); - if (r != 0) { - DRM_ERROR("Failed setting up VCN enc run queue.\n"); - return r; - } - return 0; } @@ -129,10 +109,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) kfree(adev->vcn.saved_bo); - drm_sched_entity_fini(&adev->vcn.ring_dec.sched, &adev->vcn.entity_dec); - - drm_sched_entity_fini(&adev->vcn.ring_enc[0].sched, &adev->vcn.entity_enc); - amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo, &adev->vcn.gpu_addr, (void **)&adev->vcn.cpu_addr); @@ -278,7 +254,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring) } static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, - struct amdgpu_bo *bo, bool direct, + struct amdgpu_bo *bo, struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; @@ -306,19 +282,12 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, } ib->length_dw = 16; - if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); - job->fence = dma_fence_get(f); - if (r) - goto err_free; + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); + job->fence = dma_fence_get(f); + if (r) + goto err_free; - amdgpu_job_free(job); - } else { - r = amdgpu_job_submit(job, ring, &adev->vcn.entity_dec, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err_free; - } + amdgpu_job_free(job); amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); @@ -370,11 +339,11 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand for (i = 14; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, true, fence); + return amdgpu_vcn_dec_send_msg(ring, bo, fence); } static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) + struct dma_fence **fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_bo *bo = NULL; @@ -396,7 +365,7 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han for (i = 6; i < 1024; ++i) msg[i] = cpu_to_le32(0x0); - return amdgpu_vcn_dec_send_msg(ring, bo, direct, fence); + return amdgpu_vcn_dec_send_msg(ring, bo, fence); } int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) @@ -410,7 +379,7 @@ int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) goto error; } - r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, true, &fence); + r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &fence); if (r) { DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 181e6afa9847..773010b9ff15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -67,8 +67,6 @@ struct amdgpu_vcn { struct amdgpu_ring ring_dec; struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS]; struct amdgpu_irq_src irq; - struct drm_sched_entity entity_dec; - struct drm_sched_entity entity_enc; unsigned num_enc_rings; }; -- cgit v1.2.1 From e03fd3f300f6184c1264186a4c815e93bf658abb Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Wed, 16 May 2018 16:46:18 -0400 Subject: drm/amd/display: Do not limit color depth to 8bpc Delete if statement that would force any display's color depth higher than 8 bpc to 8 Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ce10bc2d37b..52e57b52cdbb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2095,12 +2095,6 @@ convert_color_depth_from_display_info(const struct drm_connector *connector) { uint32_t bpc = connector->display_info.bpc; - /* Limited color depth to 8bit - * TODO: Still need to handle deep color - */ - if (bpc > 8) - bpc = 8; - switch (bpc) { case 0: /* Temporary Work around, DRM don't parse color depth for -- cgit v1.2.1 From 2b6199a1d1b70fccd62aed961ba4c2b979ae499c Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 3 May 2018 13:29:42 -0400 Subject: drm/amd/display: replace msleep with udelay in fbc path FBC enabling and disabling path has msleep which leads to BUG hit when called in atomic context, hence this patch replaces msleeps with udelays appropriately. Signed-off-by: Shirish S Signed-off-by: Roman Li Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c index 9150d2694450..e2994d337044 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_compressor.c @@ -121,10 +121,10 @@ static void reset_lb_on_vblank(struct dc_context *ctx) frame_count = dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT); - for (retry = 100; retry > 0; retry--) { + for (retry = 10000; retry > 0; retry--) { if (frame_count != dm_read_reg(ctx, mmCRTC_STATUS_FRAME_COUNT)) break; - msleep(1); + udelay(10); } if (!retry) dm_error("Frame count did not increase for 100ms.\n"); @@ -147,14 +147,14 @@ static void wait_for_fbc_state_changed( uint32_t addr = mmFBC_STATUS; uint32_t value; - while (counter < 10) { + while (counter < 1000) { value = dm_read_reg(cp110->base.ctx, addr); if (get_reg_field_value( value, FBC_STATUS, FBC_ENABLE_STATUS) == enabled) break; - msleep(10); + udelay(100); counter++; } -- cgit v1.2.1 From aed15309b9b2009da9dfd209deaf9af8660f50a1 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 1 May 2018 11:33:25 -0400 Subject: drm/amd/display: Release fake sink If connector doesn't have a sink, fake sink is created, but never released as it assumed that its destroyed with the stream it is used for. But now sink is released before the stream maintaing refcount consistency. This way we also avoid assigning anything to connector keeping all the operation local. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 31 ++++++++++++----------- 1 file changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 52e57b52cdbb..e2bf4fee4eb2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2310,27 +2310,22 @@ decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, } } -static int create_fake_sink(struct amdgpu_dm_connector *aconnector) +static struct dc_sink * +create_fake_sink(struct amdgpu_dm_connector *aconnector) { - struct dc_sink *sink = NULL; struct dc_sink_init_data sink_init_data = { 0 }; - + struct dc_sink *sink = NULL; sink_init_data.link = aconnector->dc_link; sink_init_data.sink_signal = aconnector->dc_link->connector_signal; sink = dc_sink_create(&sink_init_data); if (!sink) { DRM_ERROR("Failed to create sink!\n"); - return -ENOMEM; + return NULL; } - sink->sink_signal = SIGNAL_TYPE_VIRTUAL; - aconnector->fake_enable = true; - aconnector->dc_sink = sink; - aconnector->dc_link->local_sink = sink; - - return 0; + return sink; } static void set_multisync_trigger_params( @@ -2393,7 +2388,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; bool native_mode_found = false; - + struct dc_sink *sink = NULL; if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); return stream; @@ -2411,15 +2406,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, return stream; } - if (create_fake_sink(aconnector)) + sink = create_fake_sink(aconnector); + if (!sink) return stream; + } else { + sink = aconnector->dc_sink; } - stream = dc_create_stream_for_sink(aconnector->dc_sink); + stream = dc_create_stream_for_sink(sink); if (stream == NULL) { DRM_ERROR("Failed to create stream for sink!\n"); - return stream; + goto finish; } list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { @@ -2458,12 +2456,15 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, fill_audio_info( &stream->audio_info, drm_connector, - aconnector->dc_sink); + sink); update_stream_signal(stream); if (dm_state && dm_state->freesync_capable) stream->ignore_msa_timing_param = true; +finish: + if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) + dc_sink_release(sink); return stream; } -- cgit v1.2.1 From fb5fb63aa91aa7b353e3f7f0031299760f65ecf2 Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Tue, 1 May 2018 19:49:03 -0400 Subject: drm/amd/display: add register offset != 0 check. Signed-off-by: Charlene Liu Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 3 ++- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index 0a6d483dc046..c0631756cd89 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -72,7 +72,8 @@ static void dce110_update_generic_info_packet( uint32_t max_retries = 50; /*we need turn on clock before programming AFMT block*/ - REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); + if (REG(AFMT_CNTL)) + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); if (REG(AFMT_VBI_PACKET_CONTROL1)) { if (packet_index >= 8) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index a92fb0aa2ff3..c29052b6da5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1004,9 +1004,9 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option) /*don't free audio if it is from retrain or internal disable stream*/ if (option == FREE_ACQUIRED_RESOURCE && dc->caps.dynamic_audio == true) { /*we have to dynamic arbitrate the audio endpoints*/ - pipe_ctx->stream_res.audio = NULL; /*we free the resource, need reset is_audio_acquired*/ update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); + pipe_ctx->stream_res.audio = NULL; } /* TODO: notify audio driver for if audio modes list changed -- cgit v1.2.1 From 9356badb2636b0afe2b34a8133ab246547cdf9ca Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 17 May 2018 18:08:54 -0400 Subject: drm/amd/display: check if audio clk enable is applicable Fixing warning on dce10 with HDMI display. Signed-off-by: Roman Li Reviewed-by: Charlene Liu Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c index c0631756cd89..c0e813c7ddd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c @@ -720,7 +720,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets( const uint32_t *content = (const uint32_t *) &info_frame->avi.sb[0]; /*we need turn on clock before programming AFMT block*/ - REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); + if (REG(AFMT_CNTL)) + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); REG_WRITE(AFMT_AVI_INFO0, content[0]); -- cgit v1.2.1 From 01884c02c46ad1f54cfc7eb43633fad199ab8007 Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Thu, 10 May 2018 15:12:09 -0400 Subject: drm/amd/display: DP component depth 16 bpc Add register programming to support 16bpc component depth for DP. Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 653b7b2efe2e..c928ee4cd382 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -319,6 +319,10 @@ void enc1_stream_encoder_dp_set_stream_attribute( REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_12BPC); break; + case COLOR_DEPTH_161616: + REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, + DP_COMPONENT_PIXEL_DEPTH_16BPC); + break; default: REG_UPDATE(DP_PIXEL_FORMAT, DP_COMPONENT_DEPTH, DP_COMPONENT_PIXEL_DEPTH_6BPC); -- cgit v1.2.1 From aa6d4a59d686d3172012c19874add120e02c113f Mon Sep 17 00:00:00 2001 From: Eric Bernstein Date: Mon, 14 May 2018 16:55:07 -0400 Subject: drm/amd/display: Set TMZ and DCC for secondary surface Add register programming to support TMZ and DCC on secondary surfaces. Signed-off-by: Eric Bernstein Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 14 ++++++++++---- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 8 ++++++++ 2 files changed, 18 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index d2ab78b35a7a..c28085be39ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -396,11 +396,15 @@ bool hubp1_program_surface_flip_and_addr( if (address->grph_stereo.right_addr.quad_part == 0) break; - REG_UPDATE_4(DCSURF_SURFACE_CONTROL, + REG_UPDATE_8(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_TMZ, address->tmz_surface, PRIMARY_SURFACE_TMZ_C, address->tmz_surface, PRIMARY_META_SURFACE_TMZ, address->tmz_surface, - PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface); + PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface, + SECONDARY_SURFACE_TMZ, address->tmz_surface, + SECONDARY_SURFACE_TMZ_C, address->tmz_surface, + SECONDARY_META_SURFACE_TMZ, address->tmz_surface, + SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface); if (address->grph_stereo.right_meta_addr.quad_part != 0) { @@ -459,9 +463,11 @@ void hubp1_dcc_control(struct hubp *hubp, bool enable, uint32_t dcc_ind_64b_blk = independent_64b_blks ? 1 : 0; struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); - REG_UPDATE_2(DCSURF_SURFACE_CONTROL, + REG_UPDATE_4(DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, dcc_en, - PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); + PRIMARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk, + SECONDARY_SURFACE_DCC_EN, dcc_en, + SECONDARY_SURFACE_DCC_IND_64B_BLK, dcc_ind_64b_blk); } void hubp1_program_surface_config( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index af384034398f..d901d5092969 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -312,6 +312,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_META_SURFACE_TMZ_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_EN, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, PRIMARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_TMZ_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_META_SURFACE_TMZ_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_EN, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SURFACE_CONTROL, SECONDARY_SURFACE_DCC_IND_64B_BLK, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, DET_BUF_PLANE1_BASE_ADDRESS, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CB_B, mask_sh),\ HUBP_SF(HUBPRET0_HUBPRET_CONTROL, CROSSBAR_SRC_CR_R, mask_sh),\ @@ -489,6 +495,8 @@ type SECONDARY_META_SURFACE_TMZ_C;\ type PRIMARY_SURFACE_DCC_EN;\ type PRIMARY_SURFACE_DCC_IND_64B_BLK;\ + type SECONDARY_SURFACE_DCC_EN;\ + type SECONDARY_SURFACE_DCC_IND_64B_BLK;\ type DET_BUF_PLANE1_BASE_ADDRESS;\ type CROSSBAR_SRC_CB_B;\ type CROSSBAR_SRC_CR_R;\ -- cgit v1.2.1 From df099b9b60c6c378b1cd2024d1a21a87459b6614 Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Wed, 16 May 2018 10:31:30 -0400 Subject: drm/amd/display: Destroy connector state on reset When a DRM mode reset is called on resume, the connector state's destructor is not called. This leaves a dangling reference on the CRTC commit object, which was obtained by the connector state during commit setup. Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e2bf4fee4eb2..d913c0a029f2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2709,6 +2709,9 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) struct dm_connector_state *state = to_dm_connector_state(connector->state); + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + kfree(state); state = kzalloc(sizeof(*state), GFP_KERNEL); @@ -2719,8 +2722,7 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) state->underscan_hborder = 0; state->underscan_vborder = 0; - connector->state = &state->base; - connector->state->connector = connector; + __drm_atomic_helper_connector_reset(connector, &state->base); } } -- cgit v1.2.1 From 0b19fdc45feffd7569c081fe32a258df3c8ebb9b Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Thu, 17 May 2018 10:08:10 -0400 Subject: drm/amd/display: fix dscl_manual_ratio_init This change will fix wb and display scaling when ratios of 4 or more are involved Signed-off-by: Dmytro Laktyushkin Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c | 5 +++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c | 3 +-- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h | 6 +----- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c | 8 ++++---- drivers/gpu/drm/amd/display/include/fixed31_32.h | 2 ++ 5 files changed, 13 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c index e61dd97d0928..f28989860fd8 100644 --- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c +++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c @@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy( return min_clamp; } +unsigned int dc_fixpt_u3d19(struct fixed31_32 arg) +{ + return ux_dy(arg.value, 3, 19); +} + unsigned int dc_fixpt_u2d19(struct fixed31_32 arg) { return ux_dy(arg.value, 2, 19); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index 46a35c7f01df..c69fa4bfab0a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -132,8 +132,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) - -bool dpp_get_optimal_number_of_taps( +static bool dpp_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index 5944a3ba0409..e862cafa6501 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1424,12 +1424,8 @@ void dpp1_set_degamma( enum ipp_degamma_mode mode); void dpp1_set_degamma_pwl(struct dpp *dpp_base, - const struct pwl_params *params); + const struct pwl_params *params); -bool dpp_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); void dpp_read_state(struct dpp *dpp_base, struct dcn_dpp_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 4ddd6273d5a5..f862fd148cca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -565,16 +565,16 @@ static void dpp1_dscl_set_manual_ratio_init( uint32_t init_int = 0; REG_SET(SCL_HORZ_FILTER_SCALE_RATIO, 0, - SCL_H_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.horz) << 5); + SCL_H_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.horz) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO, 0, - SCL_V_SCALE_RATIO, dc_fixpt_u2d19(data->ratios.vert) << 5); + SCL_V_SCALE_RATIO, dc_fixpt_u3d19(data->ratios.vert) << 5); REG_SET(SCL_HORZ_FILTER_SCALE_RATIO_C, 0, - SCL_H_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.horz_c) << 5); + SCL_H_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.horz_c) << 5); REG_SET(SCL_VERT_FILTER_SCALE_RATIO_C, 0, - SCL_V_SCALE_RATIO_C, dc_fixpt_u2d19(data->ratios.vert_c) << 5); + SCL_V_SCALE_RATIO_C, dc_fixpt_u3d19(data->ratios.vert_c) << 5); /* * 0.24 format for fraction, first five bits zeroed diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index bb0d4ebba9f0..a981b3e99ab3 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -496,6 +496,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg) * fractional */ +unsigned int dc_fixpt_u3d19(struct fixed31_32 arg); + unsigned int dc_fixpt_u2d19(struct fixed31_32 arg); unsigned int dc_fixpt_u0d19(struct fixed31_32 arg); -- cgit v1.2.1 From 4ea7fc09539bd2399c1fa7acea14529406120d9e Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Thu, 3 May 2018 17:08:51 -0400 Subject: drm/amd/display: Do not program interrupt status on disabled crtc Prevent interrupt programming of a crtc on which the stream is disabled and it doesn't have an OTG to reference. Signed-off-by: Mikita Lipski Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index 4be21bf54749..a910f01838ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -555,6 +555,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev, return 0; } + if (acrtc->otg_inst == -1) + return 0; + irq_source = dal_irq_type + acrtc->otg_inst; st = (state == AMDGPU_IRQ_STATE_ENABLE); -- cgit v1.2.1 From 794550c6eaf791bfd2e8d70e11aa56fdd6361725 Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Mon, 7 May 2018 15:35:15 -0400 Subject: drm/amd/display: Read DPCD link caps up to and including DP_ADAPTER_CAP DP 1.4 compliance requires 16 bytes to be read when reading link caps, i.e. it requires DP_ADAPTER_CAP to be included. Included it for all DP versions because reading more than required won't fail. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 7d609c71394b..3fcb67cbc6cc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2278,7 +2278,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, static bool retrieve_link_cap(struct dc_link *link) { - uint8_t dpcd_data[DP_TRAINING_AUX_RD_INTERVAL - DP_DPCD_REV + 1]; + uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1]; union down_stream_port_count down_strm_port_count; union edp_configuration_cap edp_config_cap; -- cgit v1.2.1 From c733e40c74457ad6aa56cc8b3318e829b8274bef Mon Sep 17 00:00:00 2001 From: Nikola Cornij Date: Wed, 9 May 2018 13:11:35 -0400 Subject: drm/amd/display: Read DP_SINK_COUNT_ESI range on HPD for DP 1.4 DP 1.4 compliance now requires that registers at DP_SINK_COUNT_ESI range (0x2002-0x2003, 0x200c-0x200f) are read instead of DP_SINK_COUNT range (0x200-0x2005. Signed-off-by: Nikola Cornij Reviewed-by: Tony Cheng Acked-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 35 ++++++++++++++++++++---- 1 file changed, 30 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 3fcb67cbc6cc..7857cb42b3e6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1630,17 +1630,42 @@ static enum dc_status read_hpd_rx_irq_data( struct dc_link *link, union hpd_irq_data *irq_data) { + static enum dc_status retval; + /* The HW reads 16 bytes from 200h on HPD, * but if we get an AUX_DEFER, the HW cannot retry * and this causes the CTS tests 4.3.2.1 - 3.2.4 to * fail, so we now explicitly read 6 bytes which is * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. */ - return core_link_read_dpcd( - link, - DP_SINK_COUNT, - irq_data->raw, - sizeof(union hpd_irq_data)); + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 2 bytes at this location,... */ + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + irq_data->raw, + 2); + + if (retval != DC_OK) + return retval; + + /* ... then read remaining 4 at the other location */ + retval = core_link_read_dpcd( + link, + DP_LANE0_1_STATUS_ESI, + &irq_data->raw[2], + 4); + } + + return retval; } static bool allow_hpd_rx_irq(const struct dc_link *link) -- cgit v1.2.1 From 20d4ac659c76034586a3ab79489b0940631a65de Mon Sep 17 00:00:00 2001 From: "Leo (Sunpeng) Li" Date: Tue, 29 May 2018 09:51:51 -0400 Subject: drm/amd/display: Fix BUG_ON during CRTC atomic check update MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For cases where the CRTC is inactive (DPMS off), where a modeset is not required, yet the CRTC is still in the atomic state, we should not attempt to update anything on it. Previously, we were relying on the modereset_required() helper to check the above condition. However, the function returns false immediately if a modeset is not required, ignoring the CRTC's enable/active state flags. The correct way to filter is by looking at these flags instead. Fixes: e277adc5a06c "drm/amd/display: Hookup color management functions" Bugzilla: https://bugs.freedesktop.org/106194 Signed-off-by: Leo (Sunpeng) Li Reviewed-by: Harry Wentland Tested-by: Michel Dänzer Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d913c0a029f2..0a06941204d7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4765,15 +4765,16 @@ next_crtc: * We want to do dc stream updates that do not require a * full modeset below. */ - if (!enable || !aconnector || modereset_required(new_crtc_state)) + if (!(enable && aconnector && new_crtc_state->enable && + new_crtc_state->active)) continue; /* * Given above conditions, the dc state cannot be NULL because: - * 1. We're attempting to enable a CRTC. Which has a... - * 2. Valid connector attached, and - * 3. User does not want to reset it (disable or mark inactive, - * which can happen on a CRTC that's already disabled). - * => It currently exists. + * 1. We're in the process of enabling CRTCs (just been added + * to the dc context, or already is on the context) + * 2. Has a valid connector attached, and + * 3. Is currently active and enabled. + * => The dc stream state currently exists. */ BUG_ON(dm_new_crtc_state->stream == NULL); -- cgit v1.2.1 From 01d98506eca24f1a6b67374ffb65c2e62a56e8e7 Mon Sep 17 00:00:00 2001 From: Emily Deng Date: Wed, 30 May 2018 10:04:25 +0800 Subject: drm/amdgpu: To get gds, gws and oa from adev->gds (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As now enabled per vm bo feature, the user mode driver won't supply the bo_list generally, for this case, the gdb_base, gds_size, gws_base, gws_size and oa_base, oa_size won't be set. v2: fix warning (Chunming) Signed-off-by: Emily Deng Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 38 ++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9c1d491d742e..82312a7bc6ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -522,6 +522,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, struct amdgpu_bo_list_entry *e; struct list_head duplicates; unsigned i, tries = 10; + struct amdgpu_bo *gds; + struct amdgpu_bo *gws; + struct amdgpu_bo *oa; int r; INIT_LIST_HEAD(&p->validated); @@ -652,31 +655,36 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, p->bytes_moved_vis); + if (p->bo_list) { - struct amdgpu_bo *gds = p->bo_list->gds_obj; - struct amdgpu_bo *gws = p->bo_list->gws_obj; - struct amdgpu_bo *oa = p->bo_list->oa_obj; struct amdgpu_vm *vm = &fpriv->vm; unsigned i; + gds = p->bo_list->gds_obj; + gws = p->bo_list->gws_obj; + oa = p->bo_list->oa_obj; for (i = 0; i < p->bo_list->num_entries; i++) { struct amdgpu_bo *bo = p->bo_list->array[i].robj; p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo); } + } else { + gds = p->adev->gds.gds_gfx_bo; + gws = p->adev->gds.gws_gfx_bo; + oa = p->adev->gds.oa_gfx_bo; + } - if (gds) { - p->job->gds_base = amdgpu_bo_gpu_offset(gds); - p->job->gds_size = amdgpu_bo_size(gds); - } - if (gws) { - p->job->gws_base = amdgpu_bo_gpu_offset(gws); - p->job->gws_size = amdgpu_bo_size(gws); - } - if (oa) { - p->job->oa_base = amdgpu_bo_gpu_offset(oa); - p->job->oa_size = amdgpu_bo_size(oa); - } + if (gds) { + p->job->gds_base = amdgpu_bo_gpu_offset(gds); + p->job->gds_size = amdgpu_bo_size(gds); + } + if (gws) { + p->job->gws_base = amdgpu_bo_gpu_offset(gws); + p->job->gws_size = amdgpu_bo_size(gws); + } + if (oa) { + p->job->oa_base = amdgpu_bo_gpu_offset(oa); + p->job->oa_size = amdgpu_bo_size(oa); } if (!r && p->uf_entry.robj) { -- cgit v1.2.1 From ee5309d5f3eba16d7901d29179d03d4336319fc0 Mon Sep 17 00:00:00 2001 From: Chunming Zhou Date: Wed, 30 May 2018 11:12:08 +0800 Subject: drm/amdgpu: gds bo must not be per-vm-bo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In per-vm-bo case, there could be no bo list. But gds bo created from user space must be passed to bo list. So adding a check to prevent to creat gds bo as per-vm-bo. Signed-off-by: Chunming Zhou Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 556406a44da3..5fb156a01774 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -236,6 +236,13 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, /* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { + /* if gds bo is created from user space, it must be + * passed to bo list + */ + DRM_ERROR("GDS bo cannot be per-vm-bo\n"); + return -EINVAL; + } flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS; if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) size = size << AMDGPU_GDS_SHIFT; -- cgit v1.2.1 From ee7a99c79aa3b15e9b6157f8949a1ad8c170f17f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 28 May 2018 08:53:03 +0800 Subject: drm/amdgpu: correct SMU11 SYSPLL0 clock id values The SMU11 SYSPLL0 clock ids were assigned wrong values. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/atomfirmware.h | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index c6c1666ac120..092d800b703a 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -2026,17 +2026,15 @@ enum atom_smu11_syspll_id { SMU11_SYSPLL3_1_ID = 6, }; - enum atom_smu11_syspll0_clock_id { - SMU11_SYSPLL0_SOCCLK_ID = 0, // SOCCLK - SMU11_SYSPLL0_MP0CLK_ID = 1, // MP0CLK - SMU11_SYSPLL0_DCLK_ID = 2, // DCLK - SMU11_SYSPLL0_VCLK_ID = 3, // VCLK - SMU11_SYSPLL0_ECLK_ID = 4, // ECLK + SMU11_SYSPLL0_ECLK_ID = 0, // ECLK + SMU11_SYSPLL0_SOCCLK_ID = 1, // SOCCLK + SMU11_SYSPLL0_MP0CLK_ID = 2, // MP0CLK + SMU11_SYSPLL0_DCLK_ID = 3, // DCLK + SMU11_SYSPLL0_VCLK_ID = 4, // VCLK SMU11_SYSPLL0_DCEFCLK_ID = 5, // DCEFCLK }; - enum atom_smu11_syspll1_0_clock_id { SMU11_SYSPLL1_0_UCLKA_ID = 0, // UCLK_a }; -- cgit v1.2.1 From 2f6a18ebd08e3f41a8e01176821b7203232298c4 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 28 May 2018 08:55:09 +0800 Subject: drm/amd/powerplay: bug fixs for getsmuclockinfo The .syspll_id and .dfsdid are not initialzed correctly. And le32_to_cpu transfer is needed on the output. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c index c97b0e5ba43b..5325661fedff 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c @@ -496,7 +496,9 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI uint32_t ix; parameters.clk_id = id; + parameters.syspll_id = 0; parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; + parameters.dfsdid = 0; ix = GetIndexIntoMasterCmdTable(getsmuclockinfo); @@ -505,7 +507,7 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI return -EINVAL; output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)¶meters; - *frequency = output->atom_smu_outputclkfreq.smu_clock_freq_hz / 10000; + *frequency = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; return 0; } -- cgit v1.2.1 From 102e494001c70901a1de212469d3c8d48dbb301a Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 28 May 2018 09:22:09 +0800 Subject: drm/amdgpu: typo fix for vega20 cg flags The AMD_CG_SUPPORT_HDP_LS was wrongly written as AMD_CG_SUPPORT_BIF_LS. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/soc15.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 68b4a22a8892..83f2717fcf81 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -685,6 +685,7 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_BIF_MGCG | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | + AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_ROM_MGCG | AMD_CG_SUPPORT_VCE_MGCG | AMD_CG_SUPPORT_UVD_MGCG; -- cgit v1.2.1 From 762f52e9e4d237d8d378b5bb495f64073a9ba481 Mon Sep 17 00:00:00 2001 From: Mikita Lipski Date: Tue, 29 May 2018 17:44:36 -0400 Subject: drm/amd/pp: Connect display_clock_voltage_request to a function pointer Get rid of an empty dublicate of smu10_display_clock_voltage_request Add display_clock_voltage_request to smu10 functions struct so it can be called from outside the class and connect the pointer to the function. That way Display driver can finally apply clock voltage requests when needed. Signed-off-by: Mikita Lipski Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 64 +++++++++++------------ 1 file changed, 31 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index e160b0577e66..6a6367190bed 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -53,8 +53,37 @@ static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic; static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req); + struct pp_display_clock_request *clock_req) +{ + struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; + PPSMC_Msg msg; + switch (clk_type) { + case amd_pp_dcf_clock: + if (clk_freq == smu10_data->dcf_actual_hard_min_freq) + return 0; + msg = PPSMC_MSG_SetHardMinDcefclkByFreq; + smu10_data->dcf_actual_hard_min_freq = clk_freq; + break; + case amd_pp_soc_clock: + msg = PPSMC_MSG_SetHardMinSocclkByFreq; + break; + case amd_pp_f_clock: + if (clk_freq == smu10_data->f_actual_hard_min_freq) + return 0; + smu10_data->f_actual_hard_min_freq = clk_freq; + msg = PPSMC_MSG_SetHardMinFclkByFreq; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + return -EINVAL; + } + smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); + + return 0; +} static struct smu10_power_state *cast_smu10_ps(struct pp_hw_power_state *hw_ps) { @@ -1023,39 +1052,7 @@ static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, return 0; } -static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock_req) -{ - struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend); - enum amd_pp_clock_type clk_type = clock_req->clock_type; - uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000; - PPSMC_Msg msg; - - switch (clk_type) { - case amd_pp_dcf_clock: - if (clk_freq == smu10_data->dcf_actual_hard_min_freq) - return 0; - msg = PPSMC_MSG_SetHardMinDcefclkByFreq; - smu10_data->dcf_actual_hard_min_freq = clk_freq; - break; - case amd_pp_soc_clock: - msg = PPSMC_MSG_SetHardMinSocclkByFreq; - break; - case amd_pp_f_clock: - if (clk_freq == smu10_data->f_actual_hard_min_freq) - return 0; - smu10_data->f_actual_hard_min_freq = clk_freq; - msg = PPSMC_MSG_SetHardMinFclkByFreq; - break; - default: - pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); - return -EINVAL; - } - smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq); - - return 0; -} static int smu10_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks) { @@ -1188,6 +1185,7 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = { .set_mmhub_powergating_by_smu = smu10_set_mmhub_powergating_by_smu, .smus_notify_pwe = smu10_smus_notify_pwe, .gfx_off_control = smu10_gfx_off_control, + .display_clock_voltage_request = smu10_display_clock_voltage_request, }; int smu10_init_function_pointers(struct pp_hwmgr *hwmgr) -- cgit v1.2.1 From 9dac0c3fb41056ae48b93e679c2a796c4dcfa8ed Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 30 May 2018 09:34:23 -0500 Subject: drm/amdgpu/display: check if ppfuncs exists before using it Fixes a crash on asics without powerplay yet (e.g., vega20). Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c index d5e6b45fd6e6..5a3346124a01 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -343,7 +343,7 @@ bool dm_pp_get_clock_levels_by_type_with_latency( struct pp_clock_levels_with_latency pp_clks = { 0 }; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs->get_clock_by_type_with_latency) + if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency) return false; if (pp_funcs->get_clock_by_type_with_latency(pp_handle, -- cgit v1.2.1 From 46defdd6fff70edf6bd21848ee75d927c36e4153 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 30 May 2018 16:52:22 +0800 Subject: drm/amd/pp: Allow underclocking when od table is empty in vbios if max od engine clock limit and memory clock limit are not set in vbios. driver will allow underclocking instand of disable od feature completely. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c | 6 ------ drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c | 6 ------ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 5 ++++- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 6 ++++++ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c | 6 ------ 5 files changed, 10 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index f0d48b183d22..35bd9870ab10 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -870,12 +870,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 \ - || hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index ce64dfabd34b..925e17104f90 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -1074,12 +1074,6 @@ static int init_overdrive_limits(struct pp_hwmgr *hwmgr, powerplay_table, (const ATOM_FIRMWARE_INFO_V2_1 *)fw_info); - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 - && hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 45e9b8cb169d..b763c542bd6e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -791,7 +791,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) data->dpm_table.sclk_table.count++; } } - + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk; /* Initialize Mclk DPM table based on allow Mclk values */ data->dpm_table.mclk_table.count = 0; for (i = 0; i < dep_mclk_table->count; i++) { @@ -806,6 +807,8 @@ static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr) } } + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk; return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index d156b7bb92ae..f70dbc8ccfba 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -1311,6 +1311,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_gfx_table); + if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0) + hwmgr->platform_descriptor.overdriveLimit.engineClock = + dpm_table->dpm_levels[dpm_table->count-1].value; vega10_init_dpm_state(&(dpm_table->dpm_state)); /* Initialize Mclk DPM table based on allow Mclk values */ @@ -1319,6 +1322,9 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) vega10_setup_default_single_dpm_table(hwmgr, dpm_table, dep_mclk_table); + if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + dpm_table->dpm_levels[dpm_table->count-1].value; vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c index 0768d259c07c..16b1a9cf6cf0 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -267,12 +267,6 @@ static int init_over_drive_limits( hwmgr->platform_descriptor.maxOverdriveVDDC = 0; hwmgr->platform_descriptor.overdriveVDDCStep = 0; - if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0 || - hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) { - hwmgr->od_enabled = false; - pr_debug("OverDrive feature not support by VBIOS\n"); - } - return 0; } -- cgit v1.2.1 From ac26b0f3fc41b942f03ec9fd0392e3aa1886800a Mon Sep 17 00:00:00 2001 From: Feifei Xu Date: Thu, 24 May 2018 15:36:57 +0800 Subject: drm/gfx9: Update gc goldensetting for vega20. Update mmCB_DCC_CONFIG register goldensetting. Signed-off-by: Feifei Xu Reviewed-by: Hawking Zhang Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d7530fdfaad5..4f7a72dd3734 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -111,6 +111,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = static const struct soc15_reg_golden golden_settings_gc_9_0_vg20[] = { + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_DCC_CONFIG, 0x0f000080, 0x04000080), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_2, 0x0f000000, 0x0a000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_3, 0x30000000, 0x10000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0xf3e777ff, 0x22014042), -- cgit v1.2.1 From 7ba01f9e12bb3f088f617cf69b589ea37bd5d6ed Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Wed, 14 Mar 2018 14:44:58 -0400 Subject: drm/amdgpu: Fix NULL pointer when load kfd driver with PP block is disabled When PP block is disabled, return a fix value(100M) for mclk and sclk on bare-metal mode. This will cover the emulation mode as well. Signed-off-by: Shaoyun Liu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bd36ee9f7e6d..60fc4413449b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -336,15 +336,12 @@ void get_local_mem_info(struct kgd_dev *kgd, mem_info->local_mem_size_public, mem_info->local_mem_size_private); - if (amdgpu_emu_mode == 1) { - mem_info->mem_clk_max = 100; - return; - } - if (amdgpu_sriov_vf(adev)) mem_info->mem_clk_max = adev->clock.default_mclk / 100; - else + else if (adev->powerplay.pp_funcs) mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100; + else + mem_info->mem_clk_max = 100; } uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) @@ -361,13 +358,12 @@ uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd) struct amdgpu_device *adev = (struct amdgpu_device *)kgd; /* the sclk is in quantas of 10kHz */ - if (amdgpu_emu_mode == 1) - return 100; - if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; - - return amdgpu_dpm_get_sclk(adev, false) / 100; + else if (adev->powerplay.pp_funcs) + return amdgpu_dpm_get_sclk(adev, false) / 100; + else + return 100; } void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) -- cgit v1.2.1 From 036286e28e8ece7637cc62934f7dbc89e6656940 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 30 May 2018 17:41:44 +0100 Subject: drm/amdgpu/df: fix potential array out-of-bounds read The comparison with the number of elements in array df_v3_7_channel_number is off-by-one and can produce an array out-of-bounds read if fb_channel_number is equal to the number of elements of the array. Fix this by changing the comparison to >= instead of >. Detected by CoverityScan, CID#1469489 ("Out-of-bounds read") Fixes: 13b581502d51 ("drm/amdgpu/df: implement df v3_6 callback functions (v2)") Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 60608b3df881..d5ebe566809b 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -64,7 +64,7 @@ static u32 df_v3_6_get_hbm_channel_number(struct amdgpu_device *adev) int fb_channel_number; fb_channel_number = adev->df_funcs->get_fb_channel_number(adev); - if (fb_channel_number > ARRAY_SIZE(df_v3_6_channel_number)) + if (fb_channel_number >= ARRAY_SIZE(df_v3_6_channel_number)) fb_channel_number = 0; return df_v3_6_channel_number[fb_channel_number]; -- cgit v1.2.1 From 161c68eb502e90a5cf25913c577e23217590e43f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 24 May 2018 14:37:39 -0500 Subject: Revert "drm/amdgpu: Add an ATPX quirk for hybrid laptop" This reverts commit 13b40935cf64f59b93cf1c716a2033488e5a228c. This was a workaround for a bug in the HDA driver that prevented the HDA audio chip from going into runtime pm which prevented the GPU from going into runtime pm. Bug: https://bugs.freedesktop.org/show_bug.cgi?id=106597 Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 1bcb2b247335..daa06e7c5bb7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -569,7 +569,6 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = { { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, - { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX }, { 0, 0, 0, 0, 0 }, }; -- cgit v1.2.1 From a0b2ac29415bb44d1c212184c1385a1abe68db5c Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Wed, 23 May 2018 11:18:43 +0800 Subject: drm/amdgpu: fix the missed vcn fw version report MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It missed vcn.fw_version setting when init vcn microcode, and it will be used to report vcn ucode version via amdgpu_firmware_info sysfs interface. Signed-off-by: Huang Rui Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 6fd606f90cb2..127e87b470ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -82,6 +82,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) } hdr = (const struct common_firmware_header *)adev->vcn.fw->data; + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); family_id = le32_to_cpu(hdr->ucode_version) & 0xff; version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; -- cgit v1.2.1 From 235293901c11705f94744c08582e1ff339ee29b2 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Sat, 12 May 2018 12:31:12 +0800 Subject: drm/amdgpu: add checking for sos version The sos ucode version will be changed to align with the value of mmMP0_SMN_C2PMSG_58. Then we add a checking for this. Meanwhile, we have to be compatibility backwards. So it adds serveral recent legacy versions as the white list for the version checking. Signed-off-by: Huang Rui Acked-by: Alex Deucher Reviewed-by: Junwei Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 0c768e388ace..727071fee6f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -47,6 +47,8 @@ MODULE_FIRMWARE("amdgpu/vega20_asd.bin"); #define smnMP1_FIRMWARE_FLAGS 0x3010028 +static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554}; + static int psp_v3_1_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) { @@ -210,12 +212,31 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) return ret; } +static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver) +{ + int i; + + if (ver == adev->psp.sos_fw_version) + return true; + + /* + * Double check if the latest four legacy versions. + * If yes, it is still the right version. + */ + for (i = 0; i < sizeof(sos_old_versions) / sizeof(uint32_t); i++) { + if (sos_old_versions[i] == adev->psp.sos_fw_version) + return true; + } + + return false; +} + static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) { int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; + uint32_t sol_reg, ver; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. @@ -248,6 +269,10 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, true); + ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); + if (!psp_v3_1_match_version(adev, ver)) + DRM_WARN("SOS version doesn't match\n"); + return ret; } -- cgit v1.2.1 From 387f49e5467244b7bcb4cad0946a5d0fcade5f92 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Tue, 5 Jun 2018 17:31:51 +0800 Subject: drm/amdgpu: fix clear_all and replace handling in the VM (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: assign bo_va as well We need to put the lose ends on the invalid list because it is possible that we need to split up huge pages for them. Cc: stable@vger.kernel.org Signed-off-by: Christian König Signed-off-by: Junwei Zhang (v2) Reviewed-by: David Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ccba88cc8c54..b0eb2f537392 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2123,7 +2123,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, before->last = saddr - 1; before->offset = tmp->offset; before->flags = tmp->flags; - list_add(&before->list, &tmp->list); + before->bo_va = tmp->bo_va; + list_add(&before->list, &tmp->bo_va->invalids); } /* Remember mapping split at the end */ @@ -2133,7 +2134,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, after->offset = tmp->offset; after->offset += after->start - tmp->start; after->flags = tmp->flags; - list_add(&after->list, &tmp->list); + after->bo_va = tmp->bo_va; + list_add(&after->list, &tmp->bo_va->invalids); } list_del(&tmp->list); -- cgit v1.2.1 From 06b18f61ee78f8c69417c3a5e4f21ed678662315 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 1 Jun 2018 14:41:04 +0800 Subject: drm/amdgpu: fix CG enabling hang with gfxoff enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After defer the execution of clockgating enabling, at that time, gfx already enter into "off" state. Howerver, clockgating enabling will use MMIO to access the gfx registers, then get the gfx hung. So here we should move the gfx powergating and gfxoff enabling behavior at the end of initialization behind clockgating. Signed-off-by: Huang Rui Reviewed-by: Hawking Zhang Acked-by: Christian König Cc: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 ++++++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 5 ----- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 2 +- drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | 4 ++-- 4 files changed, 15 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 290e279abf0d..3317d1536f4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1730,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev) } } } + + if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) { + /* enable gfx powergating */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + /* enable gfxoff */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + AMD_PG_STATE_GATE); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 4f7a72dd3734..95f2773dc11d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -3405,11 +3405,6 @@ static int gfx_v9_0_late_init(void *handle) if (r) return r; - r = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); - if (r) - return r; - return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index b493369e6d0f..d0e6e2dd6bc6 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -245,7 +245,7 @@ static int pp_set_powergating_state(void *handle, } if (hwmgr->hwmgr_func->enable_per_cu_power_gating == NULL) { - pr_info("%s was not implemented.\n", __func__); + pr_debug("%s was not implemented.\n", __func__); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 6a6367190bed..d4bc83e81389 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -313,7 +313,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr) static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_disable_gfx_off(hwmgr); + return 0; } static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) @@ -328,7 +328,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr) static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { - return smu10_enable_gfx_off(hwmgr); + return 0; } static int smu10_gfx_off_control(struct pp_hwmgr *hwmgr, bool enable) -- cgit v1.2.1 From 7584498c1c67bba4d1f5a0f7bf48c9a50c3ff5c8 Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Fri, 18 May 2018 10:39:16 +0800 Subject: drm/amd/powerplay: fix missed hwmgr check warning before call gfx_off_control handler Patch 9667849bbb8d: "drm/amd/powerplay: add control gfxoff enabling in late init" from Mar 13, 2018, leads to the following static checker warning: drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c:194 pp_late_init() error: we previously assumed 'hwmgr' could be null (see line 185) drivers/gpu/drm/amd/amdgpu/../powerplay/amd_powerplay.c This patch fixes the warning to add hwmgr checking. Reported-by: Dan Carpenter Signed-off-by: Huang Rui Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index d0e6e2dd6bc6..0969b65003ed 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -191,7 +191,8 @@ static int pp_late_init(void *handle) if (adev->pm.smu_prv_buffer_size != 0) pp_reserve_vram_for_smu(adev); - if (hwmgr->hwmgr_func->gfx_off_control && + if (hwmgr && hwmgr->hwmgr_func && + hwmgr->hwmgr_func->gfx_off_control && (hwmgr->feature_mask & PP_GFXOFF_MASK)) { ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true); if (ret) -- cgit v1.2.1 From 97028037a38ae40c0e06789b71038d3a6045a413 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Mon, 4 Jun 2018 15:35:03 -0400 Subject: drm/amdgpu: Grab/put runtime PM references in atomic_commit_tail() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So, unfortunately I recently made the discovery that in the upstream kernel, the only reason that amdgpu is not currently suffering from issues with runtime PM putting the GPU into suspend while it's driving displays is due to the fact that on most prime systems, we have sound devices associated with the GPU that hold their own runtime PM ref for the GPU. What this means however, is that in the event that there isn't any kind of sound device active (which can easily be reproduced by building a kernel with sound drivers disabled), the GPU will fall asleep even when there's displays active. This appears to be in part due to the fact that amdgpu has not actually ever relied on it's rpm_idle() function to be the only thing keeping it running, and normally grabs it's own power references whenever there are displays active (as can be seen with the original pre-DC codepath in amdgpu_display_crtc_set_config() in amdgpu_display.c). This means it's very likely that this bug was introduced during the switch over the DC. So to fix this, we start grabbing runtime PM references every time we enable a previously disabled CRTC in atomic_commit_tail(). This appears to be the correct solution, as it matches up with what i915 does in i915/intel_runtime_pm.c. The one sideaffect of this is that we ignore the variable that the pre-DC code used to use for tracking when it needed runtime PM refs, adev->have_disp_power_ref. This is mainly because there's no way for a driver to tell whether or not all of it's CRTCs are enabled or disabled when we've begun committing an atomic state, as there may be CRTC commits happening in parallel that aren't contained within the atomic state being committed. So, it's safer to just get/put a reference for each CRTC being enabled or disabled in the new atomic state. Signed-off-by: Lyude Paul Acked-by: Christian König . Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 0a06941204d7..5e9e6772f754 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -4278,6 +4279,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); + pm_runtime_get_noresume(dev->dev); + acrtc->enabled = true; acrtc->hw_mode = new_crtc_state->mode; crtc->hwmode = new_crtc_state->mode; @@ -4466,6 +4469,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_wait_for_flip_done(dev, state); drm_atomic_helper_cleanup_planes(dev, state); + + /* Finally, drop a runtime PM reference for each newly disabled CRTC, + * so we can put the GPU into runtime suspend if we're not driving any + * displays anymore + */ + pm_runtime_mark_last_busy(dev->dev); + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (old_crtc_state->active && !new_crtc_state->active) + pm_runtime_put_autosuspend(dev->dev); + } } -- cgit v1.2.1 From c3dade5ef72d9141e28fb86ebd46a9b3f3f4e030 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 6 Jun 2018 11:54:45 +0800 Subject: drm/amd/powerplay: fix wrong clock adjust sequence The clocks should be adjusted after display configuration changed. Otherwise, the socclk and memclk may be forced on an unnecessary higher level. Signed-off-by: Evan Quan Reviewed-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c index 0af13c154328..323990b77ead 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c @@ -265,19 +265,18 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip, if (skip) return 0; - if (!hwmgr->ps) - /* - * for vega12/vega20 which does not support power state manager - * DAL clock limits should also be honoured - */ - phm_apply_clock_adjust_rules(hwmgr); - phm_pre_display_configuration_changed(hwmgr); phm_display_configuration_changed(hwmgr); if (hwmgr->ps) power_state_management(hwmgr, new_ps); + else + /* + * for vega12/vega20 which does not support power state manager + * DAL clock limits should also be honoured + */ + phm_apply_clock_adjust_rules(hwmgr); phm_notify_smc_display_config_after_ps_adjustment(hwmgr); -- cgit v1.2.1 From c4ff91dd40e2253ab6dd028011469c2c694e1e19 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 6 Jun 2018 13:18:31 +0100 Subject: drm/amd/pp: initialize result to before or'ing in data The current use of result is or'ing in values and checking for a non-zero result, however, result is not initialized to zero so it potentially contains garbage to start with. Fix this by initializing it to the first return from the call to vega10_program_didt_config_registers. Detected by cppcheck: "(error) Uninitialized variable: result" Fixes: 9b7b8154cdb8 ("drm/amd/powerplay: added didt support for vega10") Signed-off-by: Colin Ian King Acked-by: Huang Rui [Fix the subject as Colin's comment] Signed-off-by: Huang Rui Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c index a9efd8554fbc..dbe4b1f66784 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -1104,7 +1104,7 @@ static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) for (count = 0; count < num_se; count++) { data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK | GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | ( count << GRBM_GFX_INDEX__SE_INDEX__SHIFT); WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); - result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); + result = vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallPatternConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCStallDelayConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlResetConfig_Vega10, VEGA10_CONFIGREG_DIDT); result |= vega10_program_didt_config_registers(hwmgr, PSMSEEDCCtrlConfig_Vega10, VEGA10_CONFIGREG_DIDT); -- cgit v1.2.1 From 4b3c641b5f3a1d67c7bec60fb033233504e636ee Mon Sep 17 00:00:00 2001 From: Pratik Vishwakarma Date: Thu, 7 Jun 2018 11:48:40 +0530 Subject: drm/amd/display: Fix stale buffer object (bo) use Fixes stale buffer object (bo) usage for cursor plane Cursor plane's bo operations are handled in DC code. Currently, atomic_commit() does not handle bo operations for cursor plane, as a result the bo assigned for cursor plane in dm_plane_helper_prepare_fb() is not coherent with the updates to the same made in dc code.This mismatch leads to "bo" corruption and hence crashes during S3 entry. This patch cleans up the code which was added as a hack for 4.9 version only. Reviewed-by: Andrey Grodzovsky Signed-off-by: Pratik Vishwakarma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 11 ----------- 1 file changed, 11 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5e9e6772f754..d7d1245c1050 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3081,17 +3081,6 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, } } - /* It's a hack for s3 since in 4.9 kernel filter out cursor buffer - * prepare and cleanup in drm_atomic_helper_prepare_planes - * and drm_atomic_helper_cleanup_planes because fb doens't in s3. - * IN 4.10 kernel this code should be removed and amdgpu_device_suspend - * code touching fram buffers should be avoided for DC. - */ - if (plane->type == DRM_PLANE_TYPE_CURSOR) { - struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_state->crtc); - - acrtc->cursor_bo = obj; - } return 0; } -- cgit v1.2.1 From f8a5de447f1511917487b43dce96639c29b41219 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 12 Jun 2018 14:26:00 +0800 Subject: drm/amd/pp: Fix OD feature enable failed on Vega10 workstation cards As hw required, soc clock must large than mclk, So we set max soc clock to OD Max Memory clk. But on workstation, vbios do not support OD feature, the OD max memory clock is equal to 0. In this case, driver can support underclocking. and set od max memory clock to the value in highest memory dpm level. So the od max memory clock should be less than highest soc clock. and driver should not change the soc clock. caused by commit ca57b9b0a156 ("drm/amd/pp: Allow underclocking when od table is empty in vbios") Reviewed-by: Evan Quan Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index f70dbc8ccfba..05e680d55dbb 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -321,8 +321,12 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr) odn_table->min_vddc = dep_table[0]->entries[0].vddc; i = od_table[2]->count - 1; - od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock; - od_table[2]->entries[i].vddc = odn_table->max_vddc; + od_table[2]->entries[i].clk = hwmgr->platform_descriptor.overdriveLimit.memoryClock > od_table[2]->entries[i].clk ? + hwmgr->platform_descriptor.overdriveLimit.memoryClock : + od_table[2]->entries[i].clk; + od_table[2]->entries[i].vddc = odn_table->max_vddc > od_table[2]->entries[i].vddc ? + odn_table->max_vddc : + od_table[2]->entries[i].vddc; return 0; } @@ -1325,6 +1329,7 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0) hwmgr->platform_descriptor.overdriveLimit.memoryClock = dpm_table->dpm_levels[dpm_table->count-1].value; + vega10_init_dpm_state(&(dpm_table->dpm_state)); data->dpm_table.eclk_table.count = 0; -- cgit v1.2.1 From b0f6b8090e05a24263207a399b6c48a94034f1e8 Mon Sep 17 00:00:00 2001 From: Shaoyun Liu Date: Tue, 12 Jun 2018 13:35:44 -0400 Subject: drm/amd/include: Update df 3.6 mask and shift definition The register field hsas been changed in df 3.6, update to correct setting Signed-off-by: Shaoyun Liu Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h index 88f7c69df6b9..06fac509e987 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h @@ -36,13 +36,13 @@ /* DF_CS_AON0_DramBaseAddress0 */ #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x2 +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x9 #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc #define DF_CS_UMC_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L #define DF_CS_UMC_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L -#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x0000003CL +#define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L #endif -- cgit v1.2.1 From cb5ed37f1f9976a5f9d5f677ac9423642e30d10f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 29 May 2018 16:31:05 +0800 Subject: drm/amdgpu: fix parsing indirect register list v2 WARN_ON possible buffer overflow and avoid unnecessary dereference. v2: change BUG_ON to WARN_ON Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 95f2773dc11d..a69153435ea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1838,13 +1838,15 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, int indirect_offset, int list_size, int *unique_indirect_regs, - int *unique_indirect_reg_count, + int unique_indirect_reg_count, int *indirect_start_offsets, - int *indirect_start_offsets_count) + int *indirect_start_offsets_count, + int max_start_offsets_count) { int idx; for (; indirect_offset < list_size; indirect_offset++) { + WARN_ON(*indirect_start_offsets_count >= max_start_offsets_count); indirect_start_offsets[*indirect_start_offsets_count] = indirect_offset; *indirect_start_offsets_count = *indirect_start_offsets_count + 1; @@ -1852,14 +1854,14 @@ static void gfx_v9_1_parse_ind_reg_list(int *register_list_format, indirect_offset += 2; /* look for the matching indice */ - for (idx = 0; idx < *unique_indirect_reg_count; idx++) { + for (idx = 0; idx < unique_indirect_reg_count; idx++) { if (unique_indirect_regs[idx] == register_list_format[indirect_offset] || !unique_indirect_regs[idx]) break; } - BUG_ON(idx >= *unique_indirect_reg_count); + BUG_ON(idx >= unique_indirect_reg_count); if (!unique_indirect_regs[idx]) unique_indirect_regs[idx] = register_list_format[indirect_offset]; @@ -1894,9 +1896,10 @@ static int gfx_v9_1_init_rlc_save_restore_list(struct amdgpu_device *adev) adev->gfx.rlc.reg_list_format_direct_reg_list_length, adev->gfx.rlc.reg_list_format_size_bytes >> 2, unique_indirect_regs, - &unique_indirect_reg_count, + unique_indirect_reg_count, indirect_start_offsets, - &indirect_start_offsets_count); + &indirect_start_offsets_count, + ARRAY_SIZE(indirect_start_offsets)); /* enable auto inc in case it is disabled */ tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL)); -- cgit v1.2.1 From 333c8d3ef238f21516659b5221532060bae8a128 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 12 Jun 2018 17:01:23 +0800 Subject: drm/amd/powerplay: remove uncessary extra gfxoff control call Gfxoff is already enabled in amdgpu_device_ip_set_powergating_state. So, no need to enable it again in pp_late_init. Signed-off-by: Evan Quan Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 --------- 1 file changed, 9 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 0969b65003ed..d567be49c31b 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -180,7 +180,6 @@ static int pp_late_init(void *handle) { struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - int ret; if (hwmgr && hwmgr->pm_en) { mutex_lock(&hwmgr->smu_lock); @@ -191,14 +190,6 @@ static int pp_late_init(void *handle) if (adev->pm.smu_prv_buffer_size != 0) pp_reserve_vram_for_smu(adev); - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->gfx_off_control && - (hwmgr->feature_mask & PP_GFXOFF_MASK)) { - ret = hwmgr->hwmgr_func->gfx_off_control(hwmgr, true); - if (ret) - pr_err("gfx off enabling failed!\n"); - } - return 0; } -- cgit v1.2.1 From 5c16f36f6f003b4415237acca59384a074cd8030 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 12 Jun 2018 15:07:37 +0800 Subject: drm/amd/powerplay: Set higher SCLK&MCLK frequency than dpm7 in OD (v2) Fix the issue that SCLK&MCLK can't be set higher than dpm7 when OD is enabled in SMU7. v2: fix warning (Alex) Signed-off-by: Kenneth Feng Acked-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd') diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index b763c542bd6e..f8e866ceda02 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3755,14 +3755,17 @@ static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr, static int smu7_generate_dpm_level_enable_mask( struct pp_hwmgr *hwmgr, const void *input) { - int result; + int result = 0; const struct phm_set_power_state_input *states = (const struct phm_set_power_state_input *)input; struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); const struct smu7_power_state *smu7_ps = cast_const_phw_smu7_power_state(states->pnew_state); - result = smu7_trim_dpm_states(hwmgr, smu7_ps); + /*skip the trim if od is enabled*/ + if (!hwmgr->od_enabled) + result = smu7_trim_dpm_states(hwmgr, smu7_ps); + if (result) return result; -- cgit v1.2.1