diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 91 |
1 files changed, 45 insertions, 46 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d6..f05cceac5a52 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring) * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, obj->gtt_offset); + I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == obj->gtt_offset && + I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x head %08x tail %08x start %08x\n", @@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret) goto err_unref; - pc->gtt_offset = obj->gtt_offset; + pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); pc->cpu_page = kmap(sg_page(obj->pages->sgl)); if (pc->cpu_page == NULL) { ret = -ENOMEM; @@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (ring->irq_refcount++ == 0) + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; @@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); - } + if (--ring->irq_refcount == 0) + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); @@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); @@ -968,6 +962,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } } static int @@ -1021,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) gen6_gt_force_wake_get(dev_priv); spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount.gt++ == 0) { + if (ring->irq_refcount++ == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); else I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1045,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount.gt == 0) { + if (--ring->irq_refcount == 0) { if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); else I915_WRITE_IMR(ring, ~0); - dev_priv->gt_irq_mask |= ring->irq_enable_mask; - I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); + ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1070,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return false; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (ring->irq_refcount.pm++ == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -1092,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring) if (!dev->irq_enabled) return; - spin_lock_irqsave(&dev_priv->rps.lock, flags); - if (--ring->irq_refcount.pm == 0) { - u32 pm_imr = I915_READ(GEN6_PMIMR); + spin_lock_irqsave(&dev_priv->irq_lock, flags); + if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); - I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask); - POSTING_READ(GEN6_PMIMR); + snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); } - spin_unlock_irqrestore(&dev_priv->rps.lock, flags); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static int @@ -1144,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring, intel_ring_advance(ring); } else { struct drm_i915_gem_object *obj = ring->private; - u32 cs_offset = obj->gtt_offset; + u32 cs_offset = i915_gem_obj_ggtt_offset(obj); if (len > I830_BATCH_LIMIT) return -ENOSPC; @@ -1224,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring) i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); - ret = i915_gem_object_pin(obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); if (ret != 0) { goto err_unref; } - ring->status_page.gfx_addr = obj->gtt_offset; + ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); if (ring->status_page.page_addr == NULL) { ret = -ENOMEM; @@ -1307,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); if (ret) goto err_unref; @@ -1316,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, goto err_unpin; ring->virtual_start = - ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, + ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ring->size); if (ring->virtual_start == NULL) { DRM_ERROR("Failed to map ringbuffer.\n"); @@ -1594,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) if (INTEL_INFO(ring->dev)->gen >= 6) { I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); + if (HAS_VEBOX(ring->dev)) + I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); } ring->set_seqno(ring, seqno); @@ -1828,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return -ENOMEM; } - ret = i915_gem_object_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); if (ret != 0) { drm_gem_object_unreference(&obj->base); DRM_ERROR("Failed to ping batch bo\n"); @@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) ring->add_request = gen6_add_request; ring->get_seqno = gen6_ring_get_seqno; ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT | - PM_VEBOX_CS_ERROR_INTERRUPT; + ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; ring->irq_get = hsw_vebox_get_irq; ring->irq_put = hsw_vebox_put_irq; ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; |