diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 76 |
1 files changed, 53 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 1f7e6b9df45d..52bc6c3dfe04 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -140,8 +140,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) #endif /* Early VLV doesn't have this */ - if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && - dev->pdev->revision < 0xb) { + if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); return 0; } @@ -770,10 +769,10 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length, scratch_pte); } else { - uint64_t templ4, pml4e; + uint64_t pml4e; struct i915_page_directory_pointer *pdp; - gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte); } @@ -839,10 +838,10 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, cache_level); } else { struct i915_page_directory_pointer *pdp; - uint64_t templ4, pml4e; + uint64_t pml4e; uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT; - gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) { gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level); } @@ -1020,10 +1019,9 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_table *pt; - uint64_t temp; uint32_t pde; - gen8_for_each_pde(pt, pd, start, length, temp, pde) { + gen8_for_each_pde(pt, pd, start, length, pde) { /* Don't reallocate page tables */ if (test_bit(pde, pd->used_pdes)) { /* Scratch is never allocated this way */ @@ -1082,13 +1080,12 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_directory *pd; - uint64_t temp; uint32_t pdpe; uint32_t pdpes = I915_PDPES_PER_PDP(dev); WARN_ON(!bitmap_empty(new_pds, pdpes)); - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { if (test_bit(pdpe, pdp->used_pdpes)) continue; @@ -1136,12 +1133,11 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm, { struct drm_device *dev = vm->dev; struct i915_page_directory_pointer *pdp; - uint64_t temp; uint32_t pml4e; WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4)); - gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (!test_bit(pml4e, pml4->used_pml4es)) { pdp = alloc_pdp(dev); if (IS_ERR(pdp)) @@ -1225,7 +1221,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, struct i915_page_directory *pd; const uint64_t orig_start = start; const uint64_t orig_length = length; - uint64_t temp; uint32_t pdpe; uint32_t pdpes = I915_PDPES_PER_PDP(dev); int ret; @@ -1252,7 +1247,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, } /* For every page directory referenced, allocate page tables */ - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length, new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES)); if (ret) @@ -1264,7 +1259,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, /* Allocations have completed successfully, so set the bitmaps, and do * the mappings. */ - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { gen8_pde_t *const page_directory = kmap_px(pd); struct i915_page_table *pt; uint64_t pd_len = length; @@ -1274,7 +1269,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, /* Every pd should be allocated, we just did that above. */ WARN_ON(!pd); - gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { + gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { /* Same reasoning as pd */ WARN_ON(!pt); WARN_ON(!pd_len); @@ -1311,6 +1306,8 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, err_out: while (pdpe--) { + unsigned long temp; + for_each_set_bit(temp, new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES), I915_PDES) free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]); @@ -1333,7 +1330,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); struct i915_page_directory_pointer *pdp; - uint64_t temp, pml4e; + uint64_t pml4e; int ret = 0; /* Do the pml4 allocations first, so we don't need to track the newly @@ -1352,7 +1349,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, "The allocation has spanned more than 512GB. " "It is highly likely this is incorrect."); - gen8_for_each_pml4e(pdp, pml4, start, length, temp, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { WARN_ON(!pdp); ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length); @@ -1392,10 +1389,9 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, struct seq_file *m) { struct i915_page_directory *pd; - uint64_t temp; uint32_t pdpe; - gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) { + gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { struct i915_page_table *pt; uint64_t pd_len = length; uint64_t pd_start = start; @@ -1405,7 +1401,7 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp, continue; seq_printf(m, "\tPDPE #%d\n", pdpe); - gen8_for_each_pde(pt, pd, pd_start, pd_len, temp, pde) { + gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { uint32_t pte; gen8_pte_t *pt_vaddr; @@ -1455,11 +1451,11 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) if (!USES_FULL_48BIT_PPGTT(vm->dev)) { gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m); } else { - uint64_t templ4, pml4e; + uint64_t pml4e; struct i915_pml4 *pml4 = &ppgtt->pml4; struct i915_page_directory_pointer *pdp; - gen8_for_each_pml4e(pdp, pml4, start, length, templ4, pml4e) { + gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { if (!test_bit(pml4e, pml4->used_pml4es)) continue; @@ -2355,6 +2351,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; /* shut up gcc */ + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_dma_address(sg_iter.sg) + @@ -2381,6 +2380,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, */ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } /* @@ -2401,6 +2402,9 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { addr = sg_page_iter_dma_address(&sg_iter); @@ -2425,6 +2429,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, */ I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); POSTING_READ(GFX_FLSH_CNTL_GEN6); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void gen8_ggtt_clear_range(struct i915_address_space *vm, @@ -2439,6 +2445,9 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); if (WARN(num_entries > max_entries, "First entry = %d; Num entries = %d (max=%d)\n", @@ -2451,6 +2460,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) gen8_set_pte(>t_base[i], scratch_pte); readl(gtt_base); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void gen6_ggtt_clear_range(struct i915_address_space *vm, @@ -2465,6 +2476,9 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); if (WARN(num_entries > max_entries, "First entry = %d; Num entries = %d (max=%d)\n", @@ -2477,6 +2491,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, for (i = 0; i < num_entries; i++) iowrite32(scratch_pte, >t_base[i]); readl(gtt_base); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static void i915_ggtt_insert_entries(struct i915_address_space *vm, @@ -2484,11 +2500,17 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level cache_level, u32 unused) { + struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned int flags = (cache_level == I915_CACHE_NONE) ? AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); + } static void i915_ggtt_clear_range(struct i915_address_space *vm, @@ -2496,9 +2518,16 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool unused) { + struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; + int rpm_atomic_seq; + + rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); + intel_gtt_clear_range(first_entry, num_entries); + + assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); } static int ggtt_bind_vma(struct i915_vma *vma, @@ -2699,6 +2728,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, return ret; } vma->bound |= GLOBAL_BIND; + __i915_vma_set_map_and_fenceable(vma); list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list); } |