summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c52
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c5
-rw-r--r--drivers/gpu/drm/i915/intel_display.c308
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c5
10 files changed, 264 insertions, 121 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0239e9974bf2..2b79588541e7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2182,9 +2182,8 @@ int i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
flush_workqueue(dev_priv->wq);
- i915_gem_free_all_phys_object(dev);
-
mutex_lock(&dev->struct_mutex);
+ i915_gem_free_all_phys_object(dev);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 0defd4270594..609358faaa90 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -579,6 +579,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
} else switch (INTEL_INFO(dev)->gen) {
case 6:
ret = gen6_do_reset(dev, flags);
+ /* If reset with a user forcewake, try to restore */
+ if (atomic_read(&dev_priv->forcewake_count))
+ __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f63ee162f124..eddabf68e97a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -211,6 +211,9 @@ struct drm_i915_display_funcs {
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
+ int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94c84d744100..5c0d1247f453 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
@@ -359,8 +360,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -463,8 +463,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -797,8 +796,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
@@ -907,8 +905,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
@@ -1219,11 +1216,11 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
- }
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto unlock;
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+ }
if (obj->tiling_mode == I915_TILING_NONE)
ret = i915_gem_object_put_fence(obj);
@@ -1558,12 +1555,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping;
+ gfpmask |= mapping_gfp_mask(mapping);
+
for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
+ page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
if (IS_ERR(page))
goto err_pages;
@@ -1701,13 +1696,10 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
/* Our goal here is to return as much of the memory as
* is possible back to the system as we are called from OOM.
* To do this we must instruct the shmfs to drop all of its
- * backing pages, *now*. Here we mirror the actions taken
- * when by shmem_delete_inode() to release the backing store.
+ * backing pages, *now*.
*/
inode = obj->base.filp->f_path.dentry->d_inode;
- truncate_inode_pages(inode->i_mapping, 0);
- if (inode->i_op->truncate_range)
- inode->i_op->truncate_range(inode, 0, (loff_t)-1);
+ shmem_truncate_range(inode, 0, (loff_t)-1);
obj->madv = __I915_MADV_PURGED;
}
@@ -2080,8 +2072,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
if (!ier) {
DRM_ERROR("something (likely vbetool) disabled "
"interrupts, re-enabling\n");
- i915_driver_irq_preinstall(ring->dev);
- i915_driver_irq_postinstall(ring->dev);
+ ring->dev->driver->irq_preinstall(ring->dev);
+ ring->dev->driver->irq_postinstall(ring->dev);
}
trace_i915_gem_request_wait_begin(ring, seqno);
@@ -2926,8 +2918,6 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
*/
wmb();
- i915_gem_release_mmap(obj);
-
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3567,6 +3557,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
+ struct address_space *mapping;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
@@ -3577,6 +3568,9 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
return NULL;
}
+ mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
+
i915_gem_info_add_obj(dev_priv, size);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
@@ -3952,8 +3946,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- struct page *page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ struct page *page = shmem_read_mapping_page(mapping, i);
if (!IS_ERR(page)) {
char *dst = kmap_atomic(page);
memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
@@ -4014,8 +4007,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
struct page *page;
char *dst, *src;
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
return PTR_ERR(page);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 20a4cc5b818f..4934cf84c320 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -187,10 +187,6 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
- /* blow away mappings if mapped through GTT */
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
- i915_gem_release_mmap(obj);
-
if (obj->base.pending_write_domain)
cd->flips |= atomic_read(&obj->pending_flip);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9e34a1abeb61..ae2b49969b99 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1749,6 +1749,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
* happens.
*/
I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_HWSTAM, ~GEN6_BSD_USER_INTERRUPT);
}
/* XXX hotplug from PCH */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 2f967af8e62e..5d5def756c9e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -531,6 +531,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE 0
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
+#define GEN6_BSD_HWSTAM 0x12098
#define GEN6_BSD_IMR 0x120a8
#define GEN6_BSD_USER_INTERRUPT (1 << 12)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a94d2b5264..e8152d23d5b6 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -678,6 +678,7 @@ void i915_save_display(struct drm_device *dev)
}
/* VGA state */
+ mutex_lock(&dev->struct_mutex);
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -687,6 +688,7 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_restore_display(struct drm_device *dev)
@@ -780,6 +782,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+
+ mutex_lock(&dev->struct_mutex);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -787,6 +791,7 @@ void i915_restore_display(struct drm_device *dev)
udelay(150);
i915_restore_vga(dev);
+ mutex_unlock(&dev->struct_mutex);
}
int i915_save_state(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 81a9059b6a94..21b6f93fe919 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4687,6 +4687,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
+ intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -5217,8 +5218,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
- if (!HAS_PCH_SPLIT(dev))
- intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@@ -6262,6 +6261,197 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
+static int intel_gen2_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ /* Can't queue multiple flips, so wait for the previous
+ * one to finish before executing the next.
+ */
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen3_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ unsigned long offset;
+ u32 flip_mask;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ /* Offset into the new buffer for cases of shared fbs between CRTCs */
+ offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ goto out;
+
+ if (intel_crtc->plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+ OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
+ OUT_RING(MI_NOOP);
+ OUT_RING(MI_DISPLAY_FLIP_I915 |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset + offset);
+ OUT_RING(MI_NOOP);
+
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen4_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ /* i965+ uses the linear or tiled offsets from the
+ * Display Registers (which do not change across a page-flip)
+ * so we need only reprogram the base address.
+ */
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
+
+ /* XXX Enabling the panel-fitter across page-flip is so far
+ * untested on non-native modes, so ignore it for now.
+ * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
+ */
+ pf = 0;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+static int intel_gen6_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ uint32_t pf, pipesrc;
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
+ if (ret)
+ goto out;
+
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto out;
+
+ OUT_RING(MI_DISPLAY_FLIP |
+ MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
+
+ pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
+ pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
+ OUT_RING(pf | pipesrc);
+ ADVANCE_LP_RING();
+out:
+ return ret;
+}
+
+/*
+ * On gen7 we currently use the blit ring because (in early silicon at least)
+ * the render ring doesn't give us interrpts for page flip completion, which
+ * means clients will hang after the first flip is queued. Fortunately the
+ * blit ring generates interrupts properly, so use it instead.
+ */
+static int intel_gen7_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
+ int ret;
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
+ if (ret)
+ goto out;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto out;
+
+ intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
+ intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (obj->gtt_offset));
+ intel_ring_emit(ring, (MI_NOOP));
+ intel_ring_advance(ring);
+out:
+ return ret;
+}
+
+static int intel_default_queue_flip(struct drm_device *dev,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj)
+{
+ return -ENODEV;
+}
+
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
@@ -6272,9 +6462,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- unsigned long flags, offset;
- int pipe = intel_crtc->pipe;
- u32 pf, pipesrc;
+ unsigned long flags;
int ret;
work = kzalloc(sizeof *work, GFP_KERNEL);
@@ -6303,9 +6491,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
- if (ret)
- goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(&work->old_fb_obj->base);
@@ -6317,91 +6502,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- if (IS_GEN3(dev) || IS_GEN2(dev)) {
- u32 flip_mask;
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- ret = BEGIN_LP_RING(2);
- if (ret)
- goto cleanup_objs;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- OUT_RING(MI_WAIT_FOR_EVENT | flip_mask);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
- }
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
- /* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
-
- ret = BEGIN_LP_RING(4);
- if (ret)
- goto cleanup_objs;
-
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
- switch (INTEL_INFO(dev)->gen) {
- case 2:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 3:
- OUT_RING(MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset + offset);
- OUT_RING(MI_NOOP);
- break;
-
- case 4:
- case 5:
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
- OUT_RING(obj->gtt_offset | obj->tiling_mode);
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
-
- case 6:
- case 7:
- OUT_RING(MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
- OUT_RING(obj->gtt_offset);
-
- pf = I915_READ(PF_CTL(pipe)) & PF_ENABLE;
- pipesrc = I915_READ(PIPESRC(pipe)) & 0x0fff0fff;
- OUT_RING(pf | pipesrc);
- break;
- }
- ADVANCE_LP_RING();
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ if (ret)
+ goto cleanup_pending;
mutex_unlock(&dev->struct_mutex);
@@ -6409,10 +6521,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
+cleanup_pending:
+ atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
-cleanup_work:
mutex_unlock(&dev->struct_mutex);
spin_lock_irqsave(&dev->event_lock, flags);
@@ -7657,6 +7770,31 @@ static void intel_init_display(struct drm_device *dev)
else
dev_priv->display.get_fifo_size = i830_get_fifo_size;
}
+
+ /* Default just returns -ENODEV to indicate unsupported */
+ dev_priv->display.queue_flip = intel_default_queue_flip;
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ dev_priv->display.queue_flip = intel_gen2_queue_flip;
+ break;
+
+ case 3:
+ dev_priv->display.queue_flip = intel_gen3_queue_flip;
+ break;
+
+ case 4:
+ case 5:
+ dev_priv->display.queue_flip = intel_gen4_queue_flip;
+ break;
+
+ case 6:
+ dev_priv->display.queue_flip = intel_gen6_queue_flip;
+ break;
+ case 7:
+ dev_priv->display.queue_flip = intel_gen7_queue_flip;
+ break;
+ }
}
/*
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c006982e..56a8e2aea19c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1416,6 +1416,8 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = reg_bo;
+ mutex_lock(&dev->struct_mutex);
+
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
@@ -1440,6 +1442,8 @@ void intel_setup_overlay(struct drm_device *dev)
}
}
+ mutex_unlock(&dev->struct_mutex);
+
/* init all values */
overlay->color_key = 0x0101fe;
overlay->brightness = -19;
@@ -1464,6 +1468,7 @@ out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
+ mutex_unlock(&dev->struct_mutex);
out_free:
kfree(overlay);
return;
OpenPOWER on IntegriCloud