summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c21
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c12
4 files changed, 28 insertions, 14 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 885d33f341f3..99e4e044e958 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2278,10 +2278,8 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
vlv_check_no_gt_access(dev_priv);
- if (rpm_resume) {
+ if (rpm_resume)
intel_init_clock_gating(dev);
- i915_gem_restore_fences(dev);
- }
return ret;
}
@@ -2307,7 +2305,7 @@ static int intel_runtime_suspend(struct device *kdev)
* We are safe here against re-faults, since the fault handler takes
* an RPM reference.
*/
- i915_gem_release_all_mmaps(dev_priv);
+ i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ad1a1fbc5864..dd3acabb7edb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3130,9 +3130,10 @@ void i915_vma_destroy(struct i915_vma *vma);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
+
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __sg_page_count(struct scatterlist *sg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 38053434da8a..58d4ad2026ea 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1952,10 +1952,10 @@ out:
intel_runtime_pm_put(i915);
}
-void
-i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *on;
+ int i;
/*
* Only called during RPM suspend. All users of the userfault_list
@@ -1970,6 +1970,23 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
drm_vma_node_unmap(&obj->base.vma_node,
obj->base.dev->anon_inode->i_mapping);
}
+
+ /* The fence will be lost when the device powers down. If any were
+ * in use by hardware (i.e. they are pinned), we should not be powering
+ * down! All other fences will be reacquired by the user upon waking.
+ */
+ for (i = 0; i < dev_priv->num_fence_regs; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+
+ if (WARN_ON(reg->pin_count))
+ continue;
+
+ if (!reg->vma)
+ continue;
+
+ GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+ reg->dirty = true;
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 67013179b8ed..3c5a8082cac3 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -343,6 +343,9 @@ i915_vma_get_fence(struct i915_vma *vma)
struct drm_i915_fence_reg *fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ /* Note that we revoke fences on runtime suspend. Therefore the user
+ * must keep the device awake whilst using the fence.
+ */
assert_rpm_wakelock_held(to_i915(vma->vm->dev));
/* Just update our place in the LRU if our fence is getting reused. */
@@ -368,19 +371,14 @@ i915_vma_get_fence(struct i915_vma *vma)
* @dev: DRM device
*
* Restore the hw fence state to match the software tracking again, to be called
- * after a gpu reset and on resume.
+ * after a gpu reset and on resume. Note that on runtime suspend we only cancel
+ * the fences, to be reacquired by the user later.
*/
void i915_gem_restore_fences(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
int i;
- /* Note that this may be called outside of struct_mutex, by
- * runtime suspend/resume. The barrier we require is enforced by
- * rpm itself - all access to fences/GTT are only within an rpm
- * wakeref, and to acquire that wakeref you must pass through here.
- */
-
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
struct i915_vma *vma = reg->vma;
OpenPOWER on IntegriCloud