diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 366 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 130 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_gtt.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 37 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 58 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fbdev.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_overlay.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_panel.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 2 |
14 files changed, 454 insertions, 339 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 4e70de6ed468..b9159ade5e85 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1836,7 +1836,6 @@ int i915_driver_unload(struct drm_device *dev) flush_workqueue(dev_priv->wq); mutex_lock(&dev->struct_mutex); - i915_gem_free_all_phys_object(dev); i915_gem_cleanup_ringbuffer(dev); i915_gem_context_fini(dev); WARN_ON(dev_priv->mm.aliasing_ppgtt); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8f68678f361f..8e78703e45cf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -251,18 +251,6 @@ struct intel_ddi_plls { #define WATCH_LISTS 0 #define WATCH_GTT 0 -#define I915_GEM_PHYS_CURSOR_0 1 -#define I915_GEM_PHYS_CURSOR_1 2 -#define I915_GEM_PHYS_OVERLAY_REGS 3 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) - -struct drm_i915_gem_phys_object { - int id; - struct page **page_list; - drm_dma_handle_t *handle; - struct drm_i915_gem_object *cur_obj; -}; - struct opregion_header; struct opregion_acpi; struct opregion_swsci; @@ -1106,9 +1094,6 @@ struct i915_gem_mm { /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; - /* storage for physical objects */ - struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; - /* accounting, useful for userland debugging */ spinlock_t object_stat_lock; size_t object_memory; @@ -1712,7 +1697,7 @@ struct drm_i915_gem_object { struct drm_file *pin_filp; /** for phy allocated objects */ - struct drm_i915_gem_phys_object *phys_obj; + drm_dma_handle_t *phys_handle; union { struct i915_gem_userptr { @@ -1916,6 +1901,9 @@ struct drm_i915_cmd_table { #define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ ((dev)->pdev->device & 0x00F0) == 0x0020) +/* ULX machines are also considered ULT. */ +#define IS_HSW_ULX(dev) ((dev)->pdev->device == 0x0A0E || \ + (dev)->pdev->device == 0x0A1E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) /* @@ -2172,10 +2160,12 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_MAPPABLE 0x1 #define PIN_NONBLOCK 0x2 #define PIN_GLOBAL 0x4 +#define PIN_OFFSET_BIAS 0x8 +#define PIN_OFFSET_MASK (~4095) int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags); + uint64_t flags); int __must_check i915_vma_unbind(struct i915_vma *vma); int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); @@ -2297,13 +2287,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_engine_cs *pipelined); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); -int i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj); -void i915_gem_free_all_phys_object(struct drm_device *dev); int i915_gem_open(struct drm_device *dev, struct drm_file *file); void i915_gem_release(struct drm_device *dev, struct drm_file *file); @@ -2430,6 +2415,8 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, + unsigned long end, unsigned flags); int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); int i915_gem_evict_everything(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 87e9b349ebef..bbcd35abf247 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -47,11 +47,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, static void i915_gem_object_retire(struct drm_i915_gem_object *obj); -static int i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file); - static void i915_gem_write_fence(struct drm_device *dev, int reg, struct drm_i915_gem_object *obj); static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, @@ -214,6 +209,128 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, return 0; } +static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) +{ + drm_dma_handle_t *phys = obj->phys_handle; + + if (!phys) + return; + + if (obj->madv == I915_MADV_WILLNEED) { + struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + char *vaddr = phys->vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page = shmem_read_mapping_page(mapping, i); + if (!IS_ERR(page)) { + char *dst = kmap_atomic(page); + memcpy(dst, vaddr, PAGE_SIZE); + drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + mark_page_accessed(page); + page_cache_release(page); + } + vaddr += PAGE_SIZE; + } + i915_gem_chipset_flush(obj->base.dev); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + obj->phys_handle = NULL; +} + +int +i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, + int align) +{ + drm_dma_handle_t *phys; + struct address_space *mapping; + char *vaddr; + int i; + + if (obj->phys_handle) { + if ((unsigned long)obj->phys_handle->vaddr & (align -1)) + return -EBUSY; + + return 0; + } + + if (obj->madv != I915_MADV_WILLNEED) + return -EFAULT; + + if (obj->base.filp == NULL) + return -EINVAL; + + /* create a new object */ + phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); + if (!phys) + return -ENOMEM; + + vaddr = phys->vaddr; +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE); +#endif + mapping = file_inode(obj->base.filp)->i_mapping; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; + char *src; + + page = shmem_read_mapping_page(mapping, i); + if (IS_ERR(page)) { +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); +#endif + drm_pci_free(obj->base.dev, phys); + return PTR_ERR(page); + } + + src = kmap_atomic(page); + memcpy(vaddr, src, PAGE_SIZE); + kunmap_atomic(src); + + mark_page_accessed(page); + page_cache_release(page); + + vaddr += PAGE_SIZE; + } + + obj->phys_handle = phys; + return 0; +} + +static int +i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_device *dev = obj->base.dev; + void *vaddr = obj->phys_handle->vaddr + args->offset; + char __user *user_data = to_user_ptr(args->data_ptr); + + if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { + unsigned long unwritten; + + /* The physical object once assigned is fixed for the lifetime + * of the obj, so we can safely drop the lock and continue + * to access vaddr. + */ + mutex_unlock(&dev->struct_mutex); + unwritten = copy_from_user(vaddr, user_data, args->size); + mutex_lock(&dev->struct_mutex); + if (unwritten) + return -EFAULT; + } + + i915_gem_chipset_flush(dev); + return 0; +} + void *i915_gem_object_alloc(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -930,8 +1047,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj->phys_obj) { - ret = i915_gem_phys_pwrite(dev, obj, args, file); + if (obj->phys_handle) { + ret = i915_gem_phys_pwrite(obj, args, file); goto out; } @@ -3257,12 +3374,14 @@ static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, unsigned alignment, - unsigned flags) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 size, fence_size, fence_alignment, unfenced_alignment; - size_t gtt_max = + unsigned long start = + flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; + unsigned long end = flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; struct i915_vma *vma; int ret; @@ -3291,11 +3410,11 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, /* If the object is bigger than the entire aperture, reject it early * before evicting everything in a vain attempt to find space. */ - if (obj->base.size > gtt_max) { - DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n", + if (obj->base.size > end) { + DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n", obj->base.size, flags & PIN_MAPPABLE ? "mappable" : "total", - gtt_max); + end); return ERR_PTR(-E2BIG); } @@ -3312,12 +3431,15 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, search_free: ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, size, alignment, - obj->cache_level, 0, gtt_max, + obj->cache_level, + start, end, DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT); if (ret) { ret = i915_gem_evict_something(dev, vm, size, alignment, - obj->cache_level, flags); + obj->cache_level, + start, end, + flags); if (ret == 0) goto search_free; @@ -3892,11 +4014,30 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) return ret; } +static bool +i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) +{ + struct drm_i915_gem_object *obj = vma->obj; + + if (alignment && + vma->node.start & (alignment - 1)) + return true; + + if (flags & PIN_MAPPABLE && !obj->map_and_fenceable) + return true; + + if (flags & PIN_OFFSET_BIAS && + vma->node.start < (flags & PIN_OFFSET_MASK)) + return true; + + return false; +} + int i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm, uint32_t alignment, - unsigned flags) + uint64_t flags) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_vma *vma; @@ -3913,15 +4054,13 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; - if ((alignment && - vma->node.start & (alignment - 1)) || - (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) { + if (i915_vma_misplaced(vma, alignment, flags)) { WARN(vma->pin_count, "bo is already pinned with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", i915_gem_obj_offset(obj, vm), alignment, - flags & PIN_MAPPABLE, + !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); ret = i915_vma_unbind(vma); if (ret) @@ -4281,9 +4420,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) trace_i915_gem_object_destroy(obj); - if (obj->phys_obj) - i915_gem_detach_phys_object(dev, obj); - list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) { int ret; @@ -4301,6 +4437,8 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) } } + i915_gem_object_detach_phys(obj); + /* Stolen objects don't hold a ref, but do hold pin count. Fix that up * before progressing. */ if (obj->stolen) @@ -4792,190 +4930,6 @@ i915_gem_load(struct drm_device *dev) register_oom_notifier(&dev_priv->mm.oom_notifier); } -/* - * Create a physically contiguous memory object for this object - * e.g. for cursor + overlay regs - */ -static int i915_gem_init_phys_object(struct drm_device *dev, - int id, int size, int align) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - int ret; - - if (dev_priv->mm.phys_objs[id - 1] || !size) - return 0; - - phys_obj = kzalloc(sizeof(*phys_obj), GFP_KERNEL); - if (!phys_obj) - return -ENOMEM; - - phys_obj->id = id; - - phys_obj->handle = drm_pci_alloc(dev, size, align); - if (!phys_obj->handle) { - ret = -ENOMEM; - goto kfree_obj; - } -#ifdef CONFIG_X86 - set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - - dev_priv->mm.phys_objs[id - 1] = phys_obj; - - return 0; -kfree_obj: - kfree(phys_obj); - return ret; -} - -static void i915_gem_free_phys_object(struct drm_device *dev, int id) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_phys_object *phys_obj; - - if (!dev_priv->mm.phys_objs[id - 1]) - return; - - phys_obj = dev_priv->mm.phys_objs[id - 1]; - if (phys_obj->cur_obj) { - i915_gem_detach_phys_object(dev, phys_obj->cur_obj); - } - -#ifdef CONFIG_X86 - set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); -#endif - drm_pci_free(dev, phys_obj->handle); - kfree(phys_obj); - dev_priv->mm.phys_objs[id - 1] = NULL; -} - -void i915_gem_free_all_phys_object(struct drm_device *dev) -{ - int i; - - for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++) - i915_gem_free_phys_object(dev, i); -} - -void i915_gem_detach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - char *vaddr; - int i; - int page_count; - - if (!obj->phys_obj) - return; - vaddr = obj->phys_obj->handle->vaddr; - - page_count = obj->base.size / PAGE_SIZE; - for (i = 0; i < page_count; i++) { - struct page *page = shmem_read_mapping_page(mapping, i); - if (!IS_ERR(page)) { - char *dst = kmap_atomic(page); - memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE); - kunmap_atomic(dst); - - drm_clflush_pages(&page, 1); - - set_page_dirty(page); - mark_page_accessed(page); - page_cache_release(page); - } - } - i915_gem_chipset_flush(dev); - - obj->phys_obj->cur_obj = NULL; - obj->phys_obj = NULL; -} - -int -i915_gem_attach_phys_object(struct drm_device *dev, - struct drm_i915_gem_object *obj, - int id, - int align) -{ - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; - struct drm_i915_private *dev_priv = dev->dev_private; - int ret = 0; - int page_count; - int i; - - if (id > I915_MAX_PHYS_OBJECT) - return -EINVAL; - - if (obj->phys_obj) { - if (obj->phys_obj->id == id) - return 0; - i915_gem_detach_phys_object(dev, obj); - } - - /* create a new object */ - if (!dev_priv->mm.phys_objs[id - 1]) { - ret = i915_gem_init_phys_object(dev, id, - obj->base.size, align); - if (ret) { - DRM_ERROR("failed to init phys object %d size: %zu\n", - id, obj->base.size); - return ret; - } - } - - /* bind to the object */ - obj->phys_obj = dev_priv->mm.phys_objs[id - 1]; - obj->phys_obj->cur_obj = obj; - - page_count = obj->base.size / PAGE_SIZE; - - for (i = 0; i < page_count; i++) { - struct page *page; - char *dst, *src; - - page = shmem_read_mapping_page(mapping, i); - if (IS_ERR(page)) - return PTR_ERR(page); - - src = kmap_atomic(page); - dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE); - memcpy(dst, src, PAGE_SIZE); - kunmap_atomic(src); - - mark_page_accessed(page); - page_cache_release(page); - } - - return 0; -} - -static int -i915_gem_phys_pwrite(struct drm_device *dev, - struct drm_i915_gem_object *obj, - struct drm_i915_gem_pwrite *args, - struct drm_file *file_priv) -{ - void *vaddr = obj->phys_obj->handle->vaddr + args->offset; - char __user *user_data = to_user_ptr(args->data_ptr); - - if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { - unsigned long unwritten; - - /* The physical object once assigned is fixed for the lifetime - * of the obj, so we can safely drop the lock and continue - * to access vaddr. - */ - mutex_unlock(&dev->struct_mutex); - unwritten = copy_from_user(vaddr, user_data, args->size); - mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; - } - - i915_gem_chipset_flush(dev); - return 0; -} - void i915_gem_release(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 75fca63dc8c1..bbf4b12d842e 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -68,9 +68,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) int i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, int min_size, unsigned alignment, unsigned cache_level, + unsigned long start, unsigned long end, unsigned flags) { - struct drm_i915_private *dev_priv = dev->dev_private; struct list_head eviction_list, unwind_list; struct i915_vma *vma; int ret = 0; @@ -102,11 +102,10 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, */ INIT_LIST_HEAD(&unwind_list); - if (flags & PIN_MAPPABLE) { - BUG_ON(!i915_is_ggtt(vm)); + if (start != 0 || end != vm->total) { drm_mm_init_scan_with_range(&vm->mm, min_size, - alignment, cache_level, 0, - dev_priv->gtt.mappable_end); + alignment, cache_level, + start, end); } else drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 008e208e9a3a..3a30133f93e8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -35,6 +35,9 @@ #define __EXEC_OBJECT_HAS_PIN (1<<31) #define __EXEC_OBJECT_HAS_FENCE (1<<30) +#define __EXEC_OBJECT_NEEDS_BIAS (1<<28) + +#define BATCH_OFFSET_BIAS (256*1024) struct eb_vmas { struct list_head vmas; @@ -548,7 +551,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; bool need_fence; - unsigned flags; + uint64_t flags; int ret; flags = 0; @@ -562,6 +565,8 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, if (entry->flags & EXEC_OBJECT_NEEDS_GTT) flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); if (ret) @@ -595,6 +600,36 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, return 0; } +static bool +eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access) +{ + struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; + struct drm_i915_gem_object *obj = vma->obj; + bool need_fence, need_mappable; + + need_fence = + has_fenced_gpu_access && + entry->flags & EXEC_OBJECT_NEEDS_FENCE && + obj->tiling_mode != I915_TILING_NONE; + need_mappable = need_fence || need_reloc_mappable(vma); + + WARN_ON((need_mappable || need_fence) && + !i915_is_ggtt(vma->vm)); + + if (entry->alignment && + vma->node.start & (entry->alignment - 1)) + return true; + + if (need_mappable && !obj->map_and_fenceable) + return true; + + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && + vma->node.start < BATCH_OFFSET_BIAS) + return true; + + return false; +} + static int i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct list_head *vmas, @@ -658,26 +693,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, /* Unbind any ill-fitting objects or pin. */ list_for_each_entry(vma, vmas, exec_list) { - struct drm_i915_gem_exec_object2 *entry = vma->exec_entry; - bool need_fence, need_mappable; - - obj = vma->obj; - if (!drm_mm_node_allocated(&vma->node)) continue; - need_fence = - has_fenced_gpu_access && - entry->flags & EXEC_OBJECT_NEEDS_FENCE && - obj->tiling_mode != I915_TILING_NONE; - need_mappable = need_fence || need_reloc_mappable(vma); - - WARN_ON((need_mappable || need_fence) && - !i915_is_ggtt(vma->vm)); - - if ((entry->alignment && - vma->node.start & (entry->alignment - 1)) || - (need_mappable && !obj->map_and_fenceable)) + if (eb_vma_misplaced(vma, has_fenced_gpu_access)) ret = i915_vma_unbind(vma); else ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); @@ -778,9 +797,9 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, * relocations were valid. */ for (j = 0; j < exec[i].relocation_count; j++) { - if (copy_to_user(&user_relocs[j].presumed_offset, - &invalid_offset, - sizeof(invalid_offset))) { + if (__copy_to_user(&user_relocs[j].presumed_offset, + &invalid_offset, + sizeof(invalid_offset))) { ret = -EFAULT; mutex_lock(&dev->struct_mutex); goto err; @@ -1040,6 +1059,25 @@ static int gen8_dispatch_bsd_ring(struct drm_device *dev, } } +static struct drm_i915_gem_object * +eb_get_batch(struct eb_vmas *eb) +{ + struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list); + + /* + * SNA is doing fancy tricks with compressing batch buffers, which leads + * to negative relocation deltas. Usually that works out ok since the + * relocate address is still positive, except when the batch is placed + * very low in the GTT. Ensure this doesn't happen. + * + * Note that actual hangs have only been observed on gen7, but for + * paranoia do it everywhere. + */ + vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS; + + return vma->obj; +} + static int i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_file *file, @@ -1220,7 +1258,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; /* take note of the batch buffer before we might reorder the lists */ - batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj; + batch_obj = eb_get_batch(eb); /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; @@ -1422,18 +1460,21 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list); if (!ret) { + struct drm_i915_gem_exec_object __user *user_exec_list = + to_user_ptr(args->buffers_ptr); + /* Copy the new buffer offsets back to the user's exec list. */ - for (i = 0; i < args->buffer_count; i++) - exec_list[i].offset = exec2_list[i].offset; - /* ... and back out to userspace */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec_list, - sizeof(*exec_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user (%d)\n", + args->buffer_count, ret); + break; + } } } @@ -1484,14 +1525,21 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list); if (!ret) { /* Copy the new buffer offsets back to the user's exec list. */ - ret = copy_to_user(to_user_ptr(args->buffers_ptr), - exec2_list, - sizeof(*exec2_list) * args->buffer_count); - if (ret) { - ret = -EFAULT; - DRM_DEBUG("failed to copy %d exec entries " - "back to user (%d)\n", - args->buffer_count, ret); + struct drm_i915_gem_exec_object2 *user_exec_list = + to_user_ptr(args->buffers_ptr); + int i; + + for (i = 0; i < args->buffer_count; i++) { + ret = __copy_to_user(&user_exec_list[i].offset, + &exec2_list[i].offset, + sizeof(user_exec_list[i].offset)); + if (ret) { + ret = -EFAULT; + DRM_DEBUG("failed to copy %d exec entries " + "back to user\n", + args->buffer_count); + break; + } } } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 94916362b61c..931b906f292a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -35,25 +35,35 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); bool intel_enable_ppgtt(struct drm_device *dev, bool full) { - if (i915.enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + if (i915.enable_ppgtt == 0) return false; if (i915.enable_ppgtt == 1 && full) return false; + return true; +} + +static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) +{ + if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) + return 0; + + if (enable_ppgtt == 1) + return 1; + + if (enable_ppgtt == 2 && HAS_PPGTT(dev)) + return 2; + #ifdef CONFIG_INTEL_IOMMU /* Disable ppgtt on SNB if VT-d is on. */ if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { DRM_INFO("Disabling PPGTT because VT-d is on\n"); - return false; + return 0; } #endif - /* Full ppgtt disabled by default for now due to issues. */ - if (full) - return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2); - else - return HAS_ALIASING_PPGTT(dev); + return HAS_ALIASING_PPGTT(dev) ? 1 : 0; } @@ -1039,7 +1049,9 @@ alloc: if (ret == -ENOSPC && !retried) { ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, GEN6_PD_SIZE, GEN6_PD_ALIGN, - I915_CACHE_NONE, 0); + I915_CACHE_NONE, + 0, dev_priv->gtt.base.total, + 0); if (ret) return ret; @@ -2052,6 +2064,14 @@ int i915_gem_gtt_init(struct drm_device *dev) if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); #endif + /* + * i915.enable_ppgtt is read-only, so do an early pass to validate the + * user's requested state against the hardware/driver capabilities. We + * do this now so that we can print out any log messages once rather + * than every time we check intel_enable_ppgtt(). + */ + i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt); + DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt); return 0; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 2945f57c53ee..6b6509656f16 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -598,47 +598,71 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) dev_priv->vbt.edp_pps = *edp_pps; - dev_priv->vbt.edp_rate = edp_link_params->rate ? DP_LINK_BW_2_7 : - DP_LINK_BW_1_62; + switch (edp_link_params->rate) { + case EDP_RATE_1_62: + dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; + break; + case EDP_RATE_2_7: + dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; + break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", + edp_link_params->rate); + break; + } + switch (edp_link_params->lanes) { - case 0: + case EDP_LANE_1: dev_priv->vbt.edp_lanes = 1; break; - case 1: + case EDP_LANE_2: dev_priv->vbt.edp_lanes = 2; break; - case 3: - default: + case EDP_LANE_4: dev_priv->vbt.edp_lanes = 4; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", + edp_link_params->lanes); + break; } + switch (edp_link_params->preemphasis) { - case 0: + case EDP_PREEMPHASIS_NONE: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_0; break; - case 1: + case EDP_PREEMPHASIS_3_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; break; - case 2: + case EDP_PREEMPHASIS_6dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_6; break; - case 3: + case EDP_PREEMPHASIS_9_5dB: dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", + edp_link_params->preemphasis); + break; } + switch (edp_link_params->vswing) { - case 0: + case EDP_VSWING_0_4V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_400; break; - case 1: + case EDP_VSWING_0_6V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_600; break; - case 2: + case EDP_VSWING_0_8V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_800; break; - case 3: + case EDP_VSWING_1_2V: dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_1200; break; + default: + DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", + edp_link_params->vswing); + break; } } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1ce4ad4626e4..7a4c7c98378a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8112,14 +8112,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, addr = i915_gem_obj_ggtt_offset(obj); } else { int align = IS_I830(dev) ? 16 * 1024 : 256; - ret = i915_gem_attach_phys_object(dev, obj, - (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, - align); + ret = i915_gem_object_attach_phys(obj, align); if (ret) { DRM_DEBUG_KMS("failed to attach phys object\n"); goto fail_locked; } - addr = obj->phys_obj->handle->busaddr; + addr = obj->phys_handle->busaddr; } if (IS_GEN2(dev)) @@ -8127,10 +8125,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, finish: if (intel_crtc->cursor_bo) { - if (INTEL_INFO(dev)->cursor_needs_physical) { - if (intel_crtc->cursor_bo != obj) - i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); - } else + if (!INTEL_INFO(dev)->cursor_needs_physical) i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base); } @@ -11808,15 +11803,6 @@ void intel_modeset_init(struct drm_device *dev) } } -static void -intel_connector_break_all_links(struct intel_connector *connector) -{ - connector->base.dpms = DRM_MODE_DPMS_OFF; - connector->base.encoder = NULL; - connector->encoder->connectors_active = false; - connector->encoder->base.crtc = NULL; -} - static void intel_enable_pipe_a(struct drm_device *dev) { struct intel_connector *connector; @@ -11905,8 +11891,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) if (connector->encoder->base.crtc != &crtc->base) continue; - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } + /* multiple connectors may have the same encoder: + * handle them and break crtc link separately */ + list_for_each_entry(connector, &dev->mode_config.connector_list, + base.head) + if (connector->encoder->base.crtc == &crtc->base) { + connector->encoder->base.crtc = NULL; + connector->encoder->connectors_active = false; + } WARN_ON(crtc->active); crtc->base.enabled = false; @@ -11997,6 +11992,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) encoder->base.name); encoder->disable(encoder); } + encoder->base.crtc = NULL; + encoder->connectors_active = false; /* Inconsistent output/port/pipe state happens presumably due to * a bug in one of the get_hw_state functions. Or someplace else @@ -12007,8 +12004,8 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) base.head) { if (connector->encoder != encoder) continue; - - intel_connector_break_all_links(connector); + connector->base.dpms = DRM_MODE_DPMS_OFF; + connector->base.encoder = NULL; } } /* Enabled encoders without active connectors will be fixed in diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 2d5d9b010073..52fda950fd2a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -123,7 +123,8 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_2_7: break; case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - if ((IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) && + if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || + INTEL_INFO(dev)->gen >= 8) && intel_dp->dpcd[DP_DPCD_REV] >= 0x12) max_link_bw = DP_LINK_BW_5_4; else @@ -138,6 +139,22 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) return max_link_bw; } +static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + u8 source_max, sink_max; + + source_max = 4; + if (HAS_DDI(dev) && intel_dig_port->port == PORT_A && + (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0) + source_max = 2; + + sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + + return min(source_max, sink_max); +} + /* * The units on the numbers in the next two are... bizarre. Examples will * make it clearer; this one parallels an example in the eDP spec. @@ -188,7 +205,7 @@ intel_dp_mode_valid(struct drm_connector *connector, } max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); - max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); + max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); @@ -789,8 +806,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc *intel_crtc = encoder->new_crtc; struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; - int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + int min_lane_count = 1; + int max_lane_count = intel_dp_max_lane_count(intel_dp); /* Conveniently, the link BW constants become indices with a shift...*/ + int min_clock = 0; int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; int bpp, mode_rate; static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; @@ -824,19 +843,38 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = pipe_config->pipe_bpp; - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && - dev_priv->vbt.edp_bpp < bpp) { - DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", - dev_priv->vbt.edp_bpp); - bpp = dev_priv->vbt.edp_bpp; + if (is_edp(intel_dp)) { + if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) { + DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", + dev_priv->vbt.edp_bpp); + bpp = dev_priv->vbt.edp_bpp; + } + + if (IS_BROADWELL(dev)) { + /* Yes, it's an ugly hack. */ + min_lane_count = max_lane_count; + DRM_DEBUG_KMS("forcing lane count to max (%u) on BDW\n", + min_lane_count); + } else if (dev_priv->vbt.edp_lanes) { + min_lane_count = min(dev_priv->vbt.edp_lanes, + max_lane_count); + DRM_DEBUG_KMS("using min %u lanes per VBT\n", + min_lane_count); + } + + if (dev_priv->vbt.edp_rate) { + min_clock = min(dev_priv->vbt.edp_rate >> 3, max_clock); + DRM_DEBUG_KMS("using min %02x link bw per VBT\n", + bws[min_clock]); + } } for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, bpp); - for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { - for (clock = 0; clock <= max_clock; clock++) { + for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { + for (clock = min_clock; clock <= max_clock; clock++) { link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); link_avail = intel_dp_max_data_rate(link_clock, lane_count); diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 6ea2d75464da..088fe9378a4c 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -387,6 +387,15 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, height); } + /* No preferred mode marked by the EDID? Are there any modes? */ + if (!modes[i] && !list_empty(&connector->modes)) { + DRM_DEBUG_KMS("using first mode listed on connector %s\n", + connector->name); + modes[i] = list_first_entry(&connector->modes, + struct drm_display_mode, + head); + } + /* last resort: use current mode */ if (!modes[i]) { /* diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 0396d1312b5c..daa118978eec 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -193,7 +193,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay) struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr; + regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1340,14 +1340,12 @@ void intel_setup_overlay(struct drm_device *dev) overlay->reg_bo = reg_bo; if (OVERLAY_NEEDS_PHYSICAL(dev)) { - ret = i915_gem_attach_phys_object(dev, reg_bo, - I915_GEM_PHYS_OVERLAY_REGS, - PAGE_SIZE); + ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); if (ret) { DRM_ERROR("failed to attach phys overlay regs\n"); goto out_free_bo; } - overlay->flip_addr = reg_bo->phys_obj->handle->busaddr; + overlay->flip_addr = reg_bo->phys_handle->busaddr; } else { ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE); if (ret) { @@ -1428,7 +1426,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) /* Cast to make sparse happy, but it's wc memory anyway, so * equivalent to the wc io mapping on X86. */ regs = (struct overlay_registers __iomem *) - overlay->reg_bo->phys_obj->handle->vaddr; + overlay->reg_bo->phys_handle->vaddr; else regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); @@ -1462,7 +1460,7 @@ intel_overlay_capture_error_state(struct drm_device *dev) error->dovsta = I915_READ(DOVSTA); error->isr = I915_READ(ISR); if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) - error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr; + error->base = (__force long)overlay->reg_bo->phys_handle->vaddr; else error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 2e1338a5d488..5e6c888b4928 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -567,6 +567,7 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, enum pipe pipe = intel_get_pipe_from_connector(connector); u32 freq; unsigned long flags; + u64 n; if (!panel->backlight.present || pipe == INVALID_PIPE) return; @@ -577,10 +578,9 @@ void intel_panel_set_backlight(struct intel_connector *connector, u32 level, /* scale to hardware max, but be careful to not overflow */ freq = panel->backlight.max; - if (freq < max) - level = level * freq / max; - else - level = freq / max * level; + n = (u64)level * freq; + do_div(n, max); + level = n; panel->backlight.level = level; if (panel->backlight.device) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b86b58c44228..906d06f73e51 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2113,6 +2113,43 @@ static void intel_print_wm_latency(struct drm_device *dev, } } +static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, + uint16_t wm[5], uint16_t min) +{ + int level, max_level = ilk_wm_max_level(dev_priv->dev); + + if (wm[0] >= min) + return false; + + wm[0] = max(wm[0], min); + for (level = 1; level <= max_level; level++) + wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5)); + + return true; +} + +static void snb_wm_latency_quirk(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + bool changed; + + /* + * The BIOS provided WM memory latency values are often + * inadequate for high resolution displays. Adjust them. + */ + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | + ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + + if (!changed) + return; + + DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n"); + intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); + intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); + intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); +} + static void ilk_setup_wm_latency(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2130,6 +2167,9 @@ static void ilk_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency); intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency); intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency); + + if (IS_GEN6(dev)) + snb_wm_latency_quirk(dev); } static void ilk_compute_wm_parameters(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9cd99d9676fd..2f5d5d3f0043 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -185,6 +185,8 @@ static void vlv_force_wake_reset(struct drm_i915_private *dev_priv) { __raw_i915_write32(dev_priv, FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff)); + __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV, + _MASKED_BIT_DISABLE(0xffff)); /* something from same cacheline, but !FORCEWAKE_VLV */ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV); } |