diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 139 |
1 files changed, 90 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 49b3ebc0e7a6..4a43ef5dba31 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -461,6 +461,54 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, return ret; } +#define __EXEC_OBJECT_HAS_FENCE (1<<31) + +static int +pin_and_fence_object(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring) +{ + struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; + bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + bool need_fence, need_mappable; + int ret; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = + entry->relocation_count ? true : need_fence; + + ret = i915_gem_object_pin(obj, entry->alignment, need_mappable); + if (ret) + return ret; + + if (has_fenced_gpu_access) { + if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { + if (obj->tiling_mode) { + ret = i915_gem_object_get_fence(obj, ring); + if (ret) + goto err_unpin; + + entry->flags |= __EXEC_OBJECT_HAS_FENCE; + i915_gem_object_pin_fence(obj); + } else { + ret = i915_gem_object_put_fence(obj); + if (ret) + goto err_unpin; + } + } + obj->pending_fenced_gpu_access = need_fence; + } + + entry->offset = obj->gtt_offset; + return 0; + +err_unpin: + i915_gem_object_unpin(obj); + return ret; +} + static int i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, struct drm_file *file, @@ -518,6 +566,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, list_for_each_entry(obj, objects, exec_list) { struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; bool need_fence, need_mappable; + if (!obj->gtt_space) continue; @@ -532,58 +581,47 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, (need_mappable && !obj->map_and_fenceable)) ret = i915_gem_object_unbind(obj); else - ret = i915_gem_object_pin(obj, - entry->alignment, - need_mappable); + ret = pin_and_fence_object(obj, ring); if (ret) goto err; - - entry++; } /* Bind fresh objects */ list_for_each_entry(obj, objects, exec_list) { - struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; - bool need_fence; - - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - - if (!obj->gtt_space) { - bool need_mappable = - entry->relocation_count ? true : need_fence; - - ret = i915_gem_object_pin(obj, - entry->alignment, - need_mappable); - if (ret) - break; - } + if (obj->gtt_space) + continue; - if (has_fenced_gpu_access) { - if (need_fence) { - ret = i915_gem_object_get_fence(obj, ring); - if (ret) - break; - } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode == I915_TILING_NONE) { - /* XXX pipelined! */ - ret = i915_gem_object_put_fence(obj); - if (ret) - break; - } - obj->pending_fenced_gpu_access = need_fence; + ret = pin_and_fence_object(obj, ring); + if (ret) { + int ret_ignore; + + /* This can potentially raise a harmless + * -EINVAL if we failed to bind in the above + * call. It cannot raise -EINTR since we know + * that the bo is freshly bound and so will + * not need to be flushed or waited upon. + */ + ret_ignore = i915_gem_object_unbind(obj); + (void)ret_ignore; + WARN_ON(obj->gtt_space); + break; } - - entry->offset = obj->gtt_offset; } /* Decrement pin count for bound objects */ list_for_each_entry(obj, objects, exec_list) { - if (obj->gtt_space) - i915_gem_object_unpin(obj); + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + continue; + + entry = obj->exec_entry; + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + i915_gem_object_unpin_fence(obj); + entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; + } + + i915_gem_object_unpin(obj); } if (ret != -ENOSPC || retry > 1) @@ -600,16 +638,19 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, } while (1); err: - obj = list_entry(obj->exec_list.prev, - struct drm_i915_gem_object, - exec_list); - while (objects != &obj->exec_list) { - if (obj->gtt_space) - i915_gem_object_unpin(obj); + list_for_each_entry_continue_reverse(obj, objects, exec_list) { + struct drm_i915_gem_exec_object2 *entry; + + if (!obj->gtt_space) + continue; + + entry = obj->exec_entry; + if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { + i915_gem_object_unpin_fence(obj); + entry->flags &= ~__EXEC_OBJECT_HAS_FENCE; + } - obj = list_entry(obj->exec_list.prev, - struct drm_i915_gem_object, - exec_list); + i915_gem_object_unpin(obj); } return ret; |