summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-12-23 05:28:02 +1000
committerDave Airlie <airlied@redhat.com>2016-12-23 05:28:02 +1000
commit4a401ceeef7bf3bc55f5e913cbf19d6038cf83c6 (patch)
tree752e305ef740477c196102bc947cce48fdd74c86 /drivers/gpu/drm/i915/i915_gem.c
parentd043835d08b297a41275e4dd499a33dd57243dee (diff)
parent6ba0566cf2afcdb17bff882e3a95cbbcb22c4a83 (diff)
downloadblackbird-op-linux-4a401ceeef7bf3bc55f5e913cbf19d6038cf83c6.tar.gz
blackbird-op-linux-4a401ceeef7bf3bc55f5e913cbf19d6038cf83c6.zip
Merge tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes
First set of i915 fixes for code in next. * tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: skip the first 4k of stolen memory on everything >= gen8 drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping drm/i915: Fix use after free in logical_render_ring_init drm/i915: disable PSR by default on HSW/BDW drm/i915: Fix setting of boost freq tunable drm/i915: tune down the fast link training vs boot fail drm/i915: Reorder phys backing storage release drm/i915/gen9: Fix PCODE polling during SAGV disabling drm/i915/gen9: Fix PCODE polling during CDCLK change notification drm/i915/dsi: Fix chv_exec_gpio disabling the GPIOs it is setting drm/i915/dsi: Fix swapping of MIPI_SEQ_DEASSERT_RESET / MIPI_SEQ_ASSERT_RESET drm/i915/dsi: Do not clear DPOUNIT_CLOCK_GATE_DISABLE from vlv_init_display_clock_gating drm/i915: drop the struct_mutex when wedged or trying to reset
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c79
1 files changed, 56 insertions, 23 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 412f3513f269..4a31b7a891ec 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -174,21 +174,35 @@ static struct sg_table *
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ drm_dma_handle_t *phys;
struct sg_table *st;
struct scatterlist *sg;
+ char *vaddr;
int i;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return ERR_PTR(-EINVAL);
+ /* Always aligning to the object size, allows a single allocation
+ * to handle all possible callers, and given typical object sizes,
+ * the alignment of the buddy allocation will naturally match.
+ */
+ phys = drm_pci_alloc(obj->base.dev,
+ obj->base.size,
+ roundup_pow_of_two(obj->base.size));
+ if (!phys)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = phys->vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
char *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page))
- return ERR_CAST(page);
+ if (IS_ERR(page)) {
+ st = ERR_CAST(page);
+ goto err_phys;
+ }
src = kmap_atomic(page);
memcpy(vaddr, src, PAGE_SIZE);
@@ -202,21 +216,29 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (st == NULL)
- return ERR_PTR(-ENOMEM);
+ if (!st) {
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
+ }
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
- return ERR_PTR(-ENOMEM);
+ st = ERR_PTR(-ENOMEM);
+ goto err_phys;
}
sg = st->sgl;
sg->offset = 0;
sg->length = obj->base.size;
- sg_dma_address(sg) = obj->phys_handle->busaddr;
+ sg_dma_address(sg) = phys->busaddr;
sg_dma_len(sg) = obj->base.size;
+ obj->phys_handle = phys;
+ return st;
+
+err_phys:
+ drm_pci_free(obj->base.dev, phys);
return st;
}
@@ -272,12 +294,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
+
+ drm_pci_free(obj->base.dev, obj->phys_handle);
}
static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
- drm_pci_free(obj->base.dev, obj->phys_handle);
i915_gem_object_unpin_pages(obj);
}
@@ -538,15 +561,13 @@ int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
int align)
{
- drm_dma_handle_t *phys;
int ret;
- if (obj->phys_handle) {
- if ((unsigned long)obj->phys_handle->vaddr & (align -1))
- return -EBUSY;
+ if (align > obj->base.size)
+ return -EINVAL;
+ if (obj->ops == &i915_gem_phys_ops)
return 0;
- }
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
@@ -562,12 +583,6 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
- /* create a new object */
- phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
- if (!phys)
- return -ENOMEM;
-
- obj->phys_handle = phys;
obj->ops = &i915_gem_phys_ops;
return i915_gem_object_pin_pages(obj);
@@ -2326,7 +2341,8 @@ static struct sg_table *
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- int page_count, i;
+ const unsigned long page_count = obj->base.size / PAGE_SIZE;
+ unsigned long i;
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
@@ -2352,7 +2368,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (st == NULL)
return ERR_PTR(-ENOMEM);
- page_count = obj->base.size / PAGE_SIZE;
+rebuild_st:
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
@@ -2411,8 +2427,25 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
ret = i915_gem_gtt_prepare_pages(obj, st);
- if (ret)
- goto err_pages;
+ if (ret) {
+ /* DMA remapping failed? One possible cause is that
+ * it could not reserve enough large entries, asking
+ * for PAGE_SIZE chunks instead may be helpful.
+ */
+ if (max_segment > PAGE_SIZE) {
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
+ sg_free_table(st);
+
+ max_segment = PAGE_SIZE;
+ goto rebuild_st;
+ } else {
+ dev_warn(&dev_priv->drm.pdev->dev,
+ "Failed to DMA remap %lu pages\n",
+ page_count);
+ goto err_pages;
+ }
+ }
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
OpenPOWER on IntegriCloud