diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2015-10-01 12:34:45 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-10-06 14:15:29 +0200 |
commit | 68d6c840595849c0d29f6c52bc75b44ded66b41f (patch) | |
tree | c53a80755328b103bdf4c79c60d4f5a7e037f318 /drivers/gpu/drm | |
parent | 24dfd0736c9fc01d096e5760c656032b5a07e962 (diff) | |
download | talos-op-linux-68d6c840595849c0d29f6c52bc75b44ded66b41f.tar.gz talos-op-linux-68d6c840595849c0d29f6c52bc75b44ded66b41f.zip |
drm/i915: Only update the current userptr worker
The userptr worker allows for a slight race condition where upon there
may two or more threads calling get_user_pages for the same object. When
we have the array of pages, then we serialise the update of the object.
However, the worker should only overwrite the obj->userptr.work pointer
if and only if it is the active one. Currently we clear it for a
secondary worker with the effect that we may rarely force a second
lookup.
v2: Rebase and rename a variable to avoid 80cols
v3: Mention v2
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_userptr.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index d11901d590ac..800a5394aa1e 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -571,25 +571,25 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; struct drm_device *dev = obj->base.dev; - const int num_pages = obj->base.size >> PAGE_SHIFT; + const int npages = obj->base.size >> PAGE_SHIFT; struct page **pvec; int pinned, ret; ret = -ENOMEM; pinned = 0; - pvec = kmalloc(num_pages*sizeof(struct page *), + pvec = kmalloc(npages*sizeof(struct page *), GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); if (pvec == NULL) - pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); + pvec = drm_malloc_ab(npages, sizeof(struct page *)); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; down_read(&mm->mmap_sem); - while (pinned < num_pages) { + while (pinned < npages) { ret = get_user_pages(work->task, mm, obj->userptr.ptr + pinned * PAGE_SIZE, - num_pages - pinned, + npages - pinned, !obj->userptr.read_only, 0, pvec + pinned, NULL); if (ret < 0) @@ -601,20 +601,20 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) } mutex_lock(&dev->struct_mutex); - if (obj->userptr.work != &work->work) { - ret = 0; - } else if (pinned == num_pages) { - ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); - if (ret == 0) { - list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); - obj->get_page.sg = obj->pages->sgl; - obj->get_page.last = 0; - - pinned = 0; + if (obj->userptr.work == &work->work) { + if (pinned == npages) { + ret = __i915_gem_userptr_set_pages(obj, pvec, npages); + if (ret == 0) { + list_add_tail(&obj->global_list, + &to_i915(dev)->mm.unbound_list); + obj->get_page.sg = obj->pages->sgl; + obj->get_page.last = 0; + pinned = 0; + } } + obj->userptr.work = ERR_PTR(ret); } - obj->userptr.work = ERR_PTR(ret); obj->userptr.workers--; drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); |