diff options
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 27 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 72 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 1 |
5 files changed, 83 insertions, 29 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 81f1cff56fd5..6949c2d58f1d 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) dev_priv->ring.map.flags = 0; dev_priv->ring.map.mtrr = 0; - drm_core_ioremap(&dev_priv->ring.map, dev); + drm_core_ioremap_wc(&dev_priv->ring.map, dev); if (dev_priv->ring.map.handle == NULL) { i915_dma_cleanup(dev); @@ -1090,6 +1090,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base, dev->agp->agp_info.aper_size * 1024*1024); + if (dev_priv->mm.gtt_mapping == NULL) { + ret = -EIO; + goto out_rmmap; + } + /* Set up a WC MTRR for non-PAT systems. This is more common than * one would think, because the kernel disables PAT on first * generation Core chips because WC PAT gets overridden by a UC @@ -1122,7 +1127,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!I915_NEED_GFX_HWS(dev)) { ret = i915_init_phys_hws(dev); if (ret != 0) - goto out_rmmap; + goto out_iomapfree; } /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1161,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) return 0; +out_iomapfree: + io_mapping_free(dev_priv->mm.gtt_mapping); out_rmmap: iounmap(dev_priv->regs); free_priv: diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index a31cbdbc3c54..b293ef0bae71 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -27,6 +27,7 @@ * */ +#include <linux/device.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" @@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) i915_save_state(dev); + /* If KMS is active, we do the leavevt stuff here */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + if (i915_gem_idle(dev)) + dev_err(&dev->pdev->dev, + "GEM idle failed, resume may fail\n"); + drm_irq_uninstall(dev); + } + intel_opregion_free(dev); if (state.event == PM_EVENT_SUSPEND) { @@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) static int i915_resume(struct drm_device *dev) { + struct drm_i915_private *dev_priv = dev->dev_private; + int ret = 0; + pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) @@ -89,7 +101,20 @@ static int i915_resume(struct drm_device *dev) intel_opregion_init(dev); - return 0; + /* KMS EnterVT equivalent */ + if (drm_core_check_feature(dev, DRIVER_MODESET)) { + mutex_lock(&dev->struct_mutex); + dev_priv->mm.suspended = 0; + + ret = i915_gem_init_ringbuffer(dev); + if (ret != 0) + ret = -1; + mutex_unlock(&dev->struct_mutex); + + drm_irq_install(dev); + } + + return ret; } static struct vm_operations_struct i915_gem_vm_ops = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 135a08f615cd..17fa40858d26 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -618,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end); +int i915_gem_idle(struct drm_device *dev); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ac534c9a2f81..e9882d0c2473 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -34,10 +34,6 @@ #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) -static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain); static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); @@ -215,7 +211,7 @@ fast_user_write(struct io_mapping *mapping, vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base); unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset, - user_data, length); + user_data, length, length); io_mapping_unmap_atomic(vaddr_atomic); if (unwritten) return -EFAULT; @@ -1055,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev) drm_i915_private_t *dev_priv = dev->dev_private; uint32_t seqno; + if (!dev_priv->hw_status_page) + return; + seqno = i915_get_gem_seqno(dev); while (!list_empty(&dev_priv->mm.request_list)) { @@ -2021,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) * drm_agp_chipset_flush */ static void -i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, - uint32_t read_domains, - uint32_t write_domain) +i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; uint32_t invalidate_domains = 0; uint32_t flush_domains = 0; - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", __func__, obj, - obj->read_domains, read_domains, - obj->write_domain, write_domain); + obj->read_domains, obj->pending_read_domains, + obj->write_domain, obj->pending_write_domain); #endif /* * If the object isn't moving to a new write domain, * let the object stay in multiple read domains */ - if (write_domain == 0) - read_domains |= obj->read_domains; + if (obj->pending_write_domain == 0) + obj->pending_read_domains |= obj->read_domains; else obj_priv->dirty = 1; @@ -2054,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, * any read domains which differ from the old * write domain */ - if (obj->write_domain && obj->write_domain != read_domains) { + if (obj->write_domain && + obj->write_domain != obj->pending_read_domains) { flush_domains |= obj->write_domain; - invalidate_domains |= read_domains & ~obj->write_domain; + invalidate_domains |= + obj->pending_read_domains & ~obj->write_domain; } /* * Invalidate any read caches which may have * stale data. That is, any new read domains. */ - invalidate_domains |= read_domains & ~obj->read_domains; + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { #if WATCH_BUF DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", @@ -2071,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, i915_gem_clflush_object(obj); } - if ((write_domain | flush_domains) != 0) - obj->write_domain = write_domain; - obj->read_domains = read_domains; + /* The actual obj->write_domain will be updated with + * pending_write_domain after we emit the accumulated flush for all + * of our domain changes in execbuffers (which clears objects' + * write_domains). So if we have a current write domain that we + * aren't changing, set pending_write_domain to that. + */ + if (flush_domains == 0 && obj->pending_write_domain == 0) + obj->pending_write_domain = obj->write_domain; + obj->read_domains = obj->pending_read_domains; dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; @@ -2583,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_gem_object *obj = object_list[i]; /* Compute new gpu domains and update invalidate/flush */ - i915_gem_object_set_to_gpu_domain(obj, - obj->pending_read_domains, - obj->pending_write_domain); + i915_gem_object_set_to_gpu_domain(obj); } i915_verify_inactive(dev, __FILE__, __LINE__); @@ -2604,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, (void)i915_add_request(dev, dev->flush_domains); } + for (i = 0; i < args->buffer_count; i++) { + struct drm_gem_object *obj = object_list[i]; + + obj->write_domain = obj->pending_write_domain; + } + i915_verify_inactive(dev, __FILE__, __LINE__); #if WATCH_COHERENCY @@ -2866,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, return -EBADF; } + /* Update the active list for the hardware's current position. + * Otherwise this only updates on a delayed timer or when irqs are + * actually unmasked, and our working set ends up being larger than + * required. + */ + i915_gem_retire_requests(dev); + obj_priv = obj->driver_private; /* Don't count being on the flushing list against the object being * done. Otherwise, a buffer left on the flushing list but not getting @@ -2967,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) return 0; } -static int +int i915_gem_idle(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -3130,16 +3146,20 @@ static void i915_gem_cleanup_hws(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj = dev_priv->hws_obj; - struct drm_i915_gem_object *obj_priv = obj->driver_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; if (dev_priv->hws_obj == NULL) return; + obj = dev_priv->hws_obj; + obj_priv = obj->driver_private; + kunmap(obj_priv->page_list[0]); i915_gem_object_unpin(obj); drm_gem_object_unreference(obj); dev_priv->hws_obj = NULL; + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); dev_priv->hw_status_page = NULL; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4d2baf7b00be..65b635ce28c8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1008,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, temp = CURSOR_MODE_DISABLE; addr = 0; bo = NULL; + mutex_lock(&dev->struct_mutex); goto finish; } |