summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_mm.c60
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c3
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c10
-rw-r--r--include/drm/drm_mm.h22
5 files changed, 71 insertions, 26 deletions
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 1b5613bcb35e..189ab84c5a59 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -718,10 +718,10 @@ EXPORT_SYMBOL(drm_mm_replace_node);
* @color: opaque tag value to use for the allocation
* @start: start of the allowed range for the allocation
* @end: end of the allowed range for the allocation
+ * @flags: flags to specify how the allocation will be performed afterwards
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole. Note that there's no need to specify allocation flags, since they only
- * change the place a node is allocated from within a suitable hole.
+ * hole.
*
* Warning:
* As long as the scan list is non-empty, no other operations than
@@ -733,7 +733,8 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
u64 alignment,
unsigned long color,
u64 start,
- u64 end)
+ u64 end,
+ unsigned int flags)
{
DRM_MM_BUG_ON(start >= end);
DRM_MM_BUG_ON(!size || size > end - start);
@@ -744,6 +745,7 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
scan->color = color;
scan->alignment = alignment;
scan->size = size;
+ scan->flags = flags;
DRM_MM_BUG_ON(end <= start);
scan->range_start = start;
@@ -778,7 +780,7 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
DRM_MM_BUG_ON(node->mm != mm);
DRM_MM_BUG_ON(!node->allocated);
DRM_MM_BUG_ON(node->scanned_block);
- node->scanned_block = 1;
+ node->scanned_block = true;
mm->scan_active++;
hole = list_prev_entry(node, node_list);
@@ -800,15 +802,53 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
adj_start = max(col_start, scan->range_start);
adj_end = min(col_end, scan->range_end);
+ if (adj_end <= adj_start || adj_end - adj_start < scan->size)
+ return false;
+
+ if (scan->flags == DRM_MM_CREATE_TOP)
+ adj_start = adj_end - scan->size;
+
+ if (scan->alignment) {
+ u64 rem;
+
+ div64_u64_rem(adj_start, scan->alignment, &rem);
+ if (rem) {
+ adj_start -= rem;
+ if (scan->flags != DRM_MM_CREATE_TOP)
+ adj_start += scan->alignment;
+ if (adj_start < max(col_start, scan->range_start) ||
+ min(col_end, scan->range_end) - adj_start < scan->size)
+ return false;
+
+ if (adj_end <= adj_start ||
+ adj_end - adj_start < scan->size)
+ return false;
+ }
+ }
- if (check_free_hole(adj_start, adj_end,
- scan->size, scan->alignment)) {
+ if (mm->color_adjust) {
+ /* If allocations need adjusting due to neighbouring colours,
+ * we do not have enough information to decide if we need
+ * to evict nodes on either side of [adj_start, adj_end].
+ * What almost works is
+ * hit_start = adj_start + (hole_start - col_start);
+ * hit_end = adj_start + scan->size + (hole_end - col_end);
+ * but because the decision is only made on the final hole,
+ * we may underestimate the required adjustments for an
+ * interior allocation.
+ */
scan->hit_start = hole_start;
scan->hit_end = hole_end;
- return true;
+ } else {
+ scan->hit_start = adj_start;
+ scan->hit_end = adj_start + scan->size;
}
- return false;
+ DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end);
+ DRM_MM_BUG_ON(scan->hit_start < hole_start);
+ DRM_MM_BUG_ON(scan->hit_end > hole_end);
+
+ return true;
}
EXPORT_SYMBOL(drm_mm_scan_add_block);
@@ -836,7 +876,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
DRM_MM_BUG_ON(node->mm != scan->mm);
DRM_MM_BUG_ON(!node->scanned_block);
- node->scanned_block = 0;
+ node->scanned_block = false;
DRM_MM_BUG_ON(!node->mm->scan_active);
node->mm->scan_active--;
@@ -846,7 +886,7 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan,
prev_node->hole_follows = node->scanned_preceeds_hole;
list_add(&node->node_list, &prev_node->node_list);
- return (drm_mm_hole_node_end(node) > scan->hit_start &&
+ return (node->start + node->size > scan->hit_start &&
node->start < scan->hit_end);
}
EXPORT_SYMBOL(drm_mm_scan_remove_block);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 379ea3a96f0a..ae2733a609ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -135,7 +135,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
}
/* Try to retire some entries */
- drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0);
+ drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0);
found = 0;
INIT_LIST_HEAD(&list);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a6d5bab6f237..fa90a0c47976 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -128,7 +128,8 @@ i915_gem_evict_something(struct i915_address_space *vm,
*/
drm_mm_scan_init_with_range(&scan, &vm->mm,
min_size, alignment, cache_level,
- start, end);
+ start, end,
+ flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0);
if (flags & PIN_NONBLOCK)
phases[1] = NULL;
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 997f2bc93b9b..1bbfc24342c5 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1199,7 +1199,7 @@ static bool evict_nothing(struct drm_mm *mm,
struct drm_mm_node *node;
unsigned int n;
- drm_mm_scan_init(&scan, mm, 1, 0, 0);
+ drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
for (n = 0; n < total_size; n++) {
e = &nodes[n];
list_add(&e->link, &evict_list);
@@ -1246,7 +1246,7 @@ static bool evict_everything(struct drm_mm *mm,
unsigned int n;
int err;
- drm_mm_scan_init(&scan, mm, total_size, 0, 0);
+ drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
for (n = 0; n < total_size; n++) {
e = &nodes[n];
list_add(&e->link, &evict_list);
@@ -1296,7 +1296,8 @@ static int evict_something(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm,
size, alignment, 0,
- range_start, range_end);
+ range_start, range_end,
+ mode->create_flags);
if (!evict_nodes(&scan,
nodes, order, count,
&evict_list))
@@ -1874,7 +1875,8 @@ static int evict_color(struct drm_mm *mm,
drm_mm_scan_init_with_range(&scan, mm,
size, alignment, color,
- range_start, range_end);
+ range_start, range_end,
+ mode->create_flags);
if (!evict_nodes(&scan,
nodes, order, count,
&evict_list))
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bae0f10da8e3..606336fc229a 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -120,6 +120,7 @@ struct drm_mm_scan {
struct drm_mm_node *prev_scanned_node;
unsigned long color;
+ unsigned int flags;
};
/**
@@ -388,11 +389,9 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
struct drm_mm *mm,
- u64 size,
- u64 alignment,
- unsigned long color,
- u64 start,
- u64 end);
+ u64 size, u64 alignment, unsigned long color,
+ u64 start, u64 end,
+ unsigned int flags);
/**
* drm_mm_scan_init - initialize lru scanning
@@ -401,10 +400,10 @@ void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
* @size: size of the allocation
* @alignment: alignment of the allocation
* @color: opaque tag value to use for the allocation
+ * @flags: flags to specify how the allocation will be performed afterwards
*
* This simply sets up the scanning routines with the parameters for the desired
- * hole. Note that there's no need to specify allocation flags, since they only
- * change the place a node is allocated from within a suitable hole.
+ * hole.
*
* Warning:
* As long as the scan list is non-empty, no other operations than
@@ -414,10 +413,13 @@ static inline void drm_mm_scan_init(struct drm_mm_scan *scan,
struct drm_mm *mm,
u64 size,
u64 alignment,
- unsigned long color)
+ unsigned long color,
+ unsigned int flags)
{
- drm_mm_scan_init_with_range(scan, mm, size, alignment, color,
- 0, U64_MAX);
+ drm_mm_scan_init_with_range(scan, mm,
+ size, alignment, color,
+ 0, U64_MAX,
+ flags);
}
bool drm_mm_scan_add_block(struct drm_mm_scan *scan,
OpenPOWER on IntegriCloud