summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h56
1 files changed, 39 insertions, 17 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 18c72c0b478d..fbe17bf73a00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -53,17 +53,23 @@ struct amdgpu_bo_list_entry;
/* LOG2 number of continuous pages for the fragment field */
#define AMDGPU_LOG2_PAGES_PER_FRAG 4
-#define AMDGPU_PTE_VALID (1 << 0)
-#define AMDGPU_PTE_SYSTEM (1 << 1)
-#define AMDGPU_PTE_SNOOPED (1 << 2)
+#define AMDGPU_PTE_VALID (1ULL << 0)
+#define AMDGPU_PTE_SYSTEM (1ULL << 1)
+#define AMDGPU_PTE_SNOOPED (1ULL << 2)
/* VI only */
-#define AMDGPU_PTE_EXECUTABLE (1 << 4)
+#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
-#define AMDGPU_PTE_READABLE (1 << 5)
-#define AMDGPU_PTE_WRITEABLE (1 << 6)
+#define AMDGPU_PTE_READABLE (1ULL << 5)
+#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
-#define AMDGPU_PTE_FRAG(x) ((x & 0x1f) << 7)
+#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
+
+#define AMDGPU_PTE_PRT (1ULL << 63)
+
+/* VEGA10 only */
+#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
+#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
/* How to programm VM fault handling */
#define AMDGPU_VM_FAULT_STOP_NEVER 0
@@ -73,6 +79,10 @@ struct amdgpu_bo_list_entry;
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
+
+ /* array of page tables, one for each directory entry */
+ struct amdgpu_vm_pt *entries;
+ unsigned last_entry_used;
};
struct amdgpu_vm {
@@ -92,14 +102,10 @@ struct amdgpu_vm {
struct list_head freed;
/* contains the page directory */
- struct amdgpu_bo *page_directory;
- unsigned max_pde_used;
- struct dma_fence *page_directory_fence;
+ struct amdgpu_vm_pt root;
+ struct dma_fence *last_dir_update;
uint64_t last_eviction_counter;
- /* array of page tables, one for each page directory entry */
- struct amdgpu_vm_pt *page_tables;
-
/* for id and flush management per ring */
struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
@@ -147,7 +153,8 @@ struct amdgpu_vm_manager {
u64 fence_context;
unsigned seqno[AMDGPU_MAX_RINGS];
- uint32_t max_pfn;
+ uint64_t max_pfn;
+ uint32_t num_level;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
@@ -159,6 +166,10 @@ struct amdgpu_vm_manager {
atomic_t vm_pte_next_ring;
/* client id counter */
atomic64_t client_counter;
+
+ /* partial resident texture handling */
+ spinlock_t prt_lock;
+ atomic_t num_prt_users;
};
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
@@ -173,15 +184,19 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
+int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct dma_fence *fence,
struct amdgpu_job *job);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+int amdgpu_vm_update_directories(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
@@ -198,9 +213,16 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr, uint64_t offset,
uint64_t size, uint64_t flags);
+int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
+ struct amdgpu_bo_va *bo_va,
+ uint64_t addr, uint64_t offset,
+ uint64_t size, uint64_t flags);
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
uint64_t addr);
+int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
OpenPOWER on IntegriCloud