diff options
author | Matthew Auld <matthew.auld@intel.com> | 2017-10-09 12:00:24 +0100 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2017-10-09 17:07:29 +0100 |
commit | 84e8978e62fea661787a216e7fe9abac8f1e056e (patch) | |
tree | 4f62a5dc8622833977cd8079c9faf41b9f0179ab /drivers/gpu/drm/i915 | |
parent | 43ae70d97c5195f48d903df31ecac4c5397b2f1e (diff) | |
download | blackbird-obmc-linux-84e8978e62fea661787a216e7fe9abac8f1e056e.tar.gz blackbird-obmc-linux-84e8978e62fea661787a216e7fe9abac8f1e056e.zip |
drm/i915: s/sg_mask/sg_page_sizes/
It's a little unclear what the sg_mask actually is, so prefer the more
meaningful name of sg_page_sizes.
Suggested-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009110024.29114-1-matthew.auld@intel.com
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_dmabuf.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_internal.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/huge_pages.c | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 |
7 files changed, 38 insertions, 38 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5d322cf490c4..770305bdeabb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3537,7 +3537,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, - unsigned int sg_mask); + unsigned int sg_page_sizes); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index b43fae4b83e6..e829e8c900e8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2333,7 +2333,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned int max_segment = i915_sg_segment_size(); - unsigned int sg_mask; + unsigned int sg_page_sizes; gfp_t noreclaim; int ret; @@ -2365,7 +2365,7 @@ rebuild_st: sg = st->sgl; st->nents = 0; - sg_mask = 0; + sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, @@ -2419,7 +2419,7 @@ rebuild_st: sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { if (i) { - sg_mask |= sg->length; + sg_page_sizes |= sg->length; sg = sg_next(sg); } st->nents++; @@ -2433,7 +2433,7 @@ rebuild_st: WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); } if (sg) { /* loop terminated early; short sg table */ - sg_mask |= sg->length; + sg_page_sizes |= sg->length; sg_mark_end(sg); } @@ -2464,7 +2464,7 @@ rebuild_st: if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj, st); - __i915_gem_object_set_pages(obj, st, sg_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -2492,7 +2492,7 @@ err_pages: void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, - unsigned int sg_mask) + unsigned int sg_page_sizes) { struct drm_i915_private *i915 = to_i915(obj->base.dev); unsigned long supported = INTEL_INFO(i915)->page_sizes; @@ -2512,16 +2512,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, obj->mm.quirked = true; } - GEM_BUG_ON(!sg_mask); - obj->mm.page_sizes.phys = sg_mask; + GEM_BUG_ON(!sg_page_sizes); + obj->mm.page_sizes.phys = sg_page_sizes; /* - * Calculate the supported page-sizes which fit into the given sg_mask. - * This will give us the page-sizes which we may be able to use - * opportunistically when later inserting into the GTT. For example if - * phys=2G, then in theory we should be able to use 1G, 2M, 64K or 4K - * pages, although in practice this will depend on a number of other - * factors. + * Calculate the supported page-sizes which fit into the given + * sg_page_sizes. This will give us the page-sizes which we may be able + * to use opportunistically when later inserting into the GTT. For + * example if phys=2G, then in theory we should be able to use 1G, 2M, + * 64K or 4K pages, although in practice this will depend on a number of + * other factors. */ obj->mm.page_sizes.sg = 0; for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index e542a9d80077..864439a214c8 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -259,16 +259,16 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) { struct sg_table *pages; - unsigned int sg_mask; + unsigned int sg_page_sizes; pages = dma_buf_map_attachment(obj->base.import_attach, DMA_BIDIRECTIONAL); if (IS_ERR(pages)) return PTR_ERR(pages); - sg_mask = i915_sg_page_sizes(pages->sgl); + sg_page_sizes = i915_sg_page_sizes(pages->sgl); - __i915_gem_object_set_pages(obj, pages, sg_mask); + __i915_gem_object_set_pages(obj, pages, sg_page_sizes); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c index bdc23c4c8783..ee83ec838ee7 100644 --- a/drivers/gpu/drm/i915/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/i915_gem_internal.c @@ -49,7 +49,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_mask; + unsigned int sg_page_sizes; unsigned int npages; int max_order; gfp_t gfp; @@ -88,7 +88,7 @@ create_st: sg = st->sgl; st->nents = 0; - sg_mask = 0; + sg_page_sizes = 0; do { int order = min(fls(npages) - 1, max_order); @@ -106,7 +106,7 @@ create_st: } while (1); sg_set_page(sg, page, PAGE_SIZE << order, 0); - sg_mask |= PAGE_SIZE << order; + sg_page_sizes |= PAGE_SIZE << order; st->nents++; npages -= 1 << order; @@ -135,7 +135,7 @@ create_st: */ obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, st, sg_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 41e16e19c3f3..c36a84b070b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -405,7 +405,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, { unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; - unsigned int sg_mask; + unsigned int sg_page_sizes; int ret; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -435,9 +435,9 @@ alloc_table: return ERR_PTR(ret); } - sg_mask = i915_sg_page_sizes(st->sgl); + sg_page_sizes = i915_sg_page_sizes(st->sgl); - __i915_gem_object_set_pages(obj, st, sg_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return st; } diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c index b8495882e5b0..b8b9d0822199 100644 --- a/drivers/gpu/drm/i915/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c @@ -68,7 +68,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) unsigned int page_mask = obj->mm.page_mask; struct sg_table *st; struct scatterlist *sg; - unsigned int sg_mask; + unsigned int sg_page_sizes; u64 rem; st = kmalloc(sizeof(*st), GFP); @@ -83,7 +83,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) rem = obj->base.size; sg = st->sgl; st->nents = 0; - sg_mask = 0; + sg_page_sizes = 0; /* * Our goal here is simple, we want to greedily fill the object from @@ -104,7 +104,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) goto err; sg_set_page(sg, page, page_size, 0); - sg_mask |= page_size; + sg_page_sizes |= page_size; st->nents++; rem -= page_size; @@ -124,8 +124,8 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) obj->mm.madv = I915_MADV_DONTNEED; - GEM_BUG_ON(sg_mask != obj->mm.page_mask); - __i915_gem_object_set_pages(obj, st, sg_mask); + GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -192,7 +192,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) const u64 max_len = rounddown_pow_of_two(UINT_MAX); struct sg_table *st; struct scatterlist *sg; - unsigned int sg_mask; + unsigned int sg_page_sizes; u64 rem; st = kmalloc(sizeof(*st), GFP); @@ -208,7 +208,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) rem = obj->base.size; sg = st->sgl; st->nents = 0; - sg_mask = 0; + sg_page_sizes = 0; do { unsigned int page_size = get_largest_page_size(i915, rem); unsigned int len = min(page_size * div_u64(rem, page_size), @@ -221,7 +221,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) sg_dma_len(sg) = len; sg_dma_address(sg) = page_size; - sg_mask |= len; + sg_page_sizes |= len; st->nents++; @@ -236,7 +236,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, st, sg_mask); + __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 883bc19e3aaf..9da0c9f99916 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -45,7 +45,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) #define PFN_BIAS 0x1000 struct sg_table *pages; struct scatterlist *sg; - unsigned int sg_mask; + unsigned int sg_page_sizes; typeof(obj->base.size) rem; pages = kmalloc(sizeof(*pages), GFP); @@ -58,7 +58,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) return -ENOMEM; } - sg_mask = 0; + sg_page_sizes = 0; rem = obj->base.size; for (sg = pages->sgl; sg; sg = sg_next(sg)) { unsigned long len = min_t(typeof(rem), rem, BIT(31)); @@ -67,7 +67,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); sg_dma_address(sg) = page_to_phys(sg_page(sg)); sg_dma_len(sg) = len; - sg_mask |= len; + sg_page_sizes |= len; rem -= len; } @@ -75,7 +75,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, pages, sg_mask); + __i915_gem_object_set_pages(obj, pages, sg_page_sizes); return 0; #undef GFP |