summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-24 10:11:18 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-24 10:11:23 +0200
commit416dfdcdb894432547ead4fcb9fa6a36b396059e (patch)
tree8033fdda07397a59c5fa98c88927040906ce6c1a /drivers/gpu/drm
parent56449f437add737a1e5e1cb7e00f63ac8ead1938 (diff)
parent091069740304c979f957ceacec39c461d0192158 (diff)
downloadblackbird-obmc-linux-416dfdcdb894432547ead4fcb9fa6a36b396059e.tar.gz
blackbird-obmc-linux-416dfdcdb894432547ead4fcb9fa6a36b396059e.zip
Merge commit 'v2.6.30-rc3' into tracing/hw-branch-tracing
Conflicts: arch/x86/kernel/ptrace.c Merge reason: fix the conflict above, and also pick up the CONFIG_BROKEN dependency change from upstream so that we can remove it here. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_stub.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c187
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debugfs.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c111
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c23
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c22
-rw-r--r--drivers/gpu/drm/via/via_dma.c12
11 files changed, 427 insertions, 46 deletions
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index d009661781bc..ef878615c49f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -159,6 +159,9 @@ void drm_master_put(struct drm_master **master)
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ if (file_priv->is_master)
+ return 0;
+
if (file_priv->minor->master && file_priv->minor->master != file_priv->master)
return -EINVAL;
@@ -169,6 +172,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
file_priv->minor->master != file_priv->master) {
mutex_lock(&dev->struct_mutex);
file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
mutex_unlock(&dev->struct_mutex);
}
@@ -178,10 +182,15 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- if (!file_priv->master)
+ if (!file_priv->is_master)
return -EINVAL;
+
+ if (!file_priv->minor->master)
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
drm_master_put(&file_priv->minor->master);
+ file_priv->is_master = 0;
mutex_unlock(&dev->struct_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index bc0c6849360c..022876ae34f0 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -132,6 +132,7 @@ void drm_sysfs_destroy(void)
*/
static void drm_sysfs_device_release(struct device *dev)
{
+ memset(dev, 0, sizeof(struct device));
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3750d8003048..473a8f7fbdb5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -446,6 +446,9 @@ struct drm_i915_gem_object {
uint32_t tiling_mode;
uint32_t stride;
+ /** Record of address bit 17 of each page at last unbind. */
+ long *bit_17;
+
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type;
@@ -635,9 +638,13 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
+int i915_gem_object_get_pages(struct drm_gem_object *obj);
+void i915_gem_object_put_pages(struct drm_gem_object *obj);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
+void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1449b452cc63..4642115902d6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,8 +43,6 @@ static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
uint64_t offset,
uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_get_pages(struct drm_gem_object *obj);
-static void i915_gem_object_put_pages(struct drm_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
unsigned alignment);
@@ -143,15 +141,27 @@ fast_shmem_read(struct page **pages,
int length)
{
char __iomem *vaddr;
- int ret;
+ int unwritten;
vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
if (vaddr == NULL)
return -ENOMEM;
- ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
+ unwritten = __copy_to_user_inatomic(data, vaddr + page_offset, length);
kunmap_atomic(vaddr, KM_USER0);
- return ret;
+ if (unwritten)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+{
+ drm_i915_private_t *dev_priv = obj->dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+ return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
}
static inline int
@@ -181,6 +191,64 @@ slow_shmem_copy(struct page *dst_page,
return 0;
}
+static inline int
+slow_shmem_bit17_copy(struct page *gpu_page,
+ int gpu_offset,
+ struct page *cpu_page,
+ int cpu_offset,
+ int length,
+ int is_read)
+{
+ char *gpu_vaddr, *cpu_vaddr;
+
+ /* Use the unswizzled path if this page isn't affected. */
+ if ((page_to_phys(gpu_page) & (1 << 17)) == 0) {
+ if (is_read)
+ return slow_shmem_copy(cpu_page, cpu_offset,
+ gpu_page, gpu_offset, length);
+ else
+ return slow_shmem_copy(gpu_page, gpu_offset,
+ cpu_page, cpu_offset, length);
+ }
+
+ gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
+ if (gpu_vaddr == NULL)
+ return -ENOMEM;
+
+ cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
+ if (cpu_vaddr == NULL) {
+ kunmap_atomic(gpu_vaddr, KM_USER0);
+ return -ENOMEM;
+ }
+
+ /* Copy the data, XORing A6 with A17 (1). The user already knows he's
+ * XORing with the other bits (A9 for Y, A9 and A10 for X)
+ */
+ while (length > 0) {
+ int cacheline_end = ALIGN(gpu_offset + 1, 64);
+ int this_length = min(cacheline_end - gpu_offset, length);
+ int swizzled_gpu_offset = gpu_offset ^ 64;
+
+ if (is_read) {
+ memcpy(cpu_vaddr + cpu_offset,
+ gpu_vaddr + swizzled_gpu_offset,
+ this_length);
+ } else {
+ memcpy(gpu_vaddr + swizzled_gpu_offset,
+ cpu_vaddr + cpu_offset,
+ this_length);
+ }
+ cpu_offset += this_length;
+ gpu_offset += this_length;
+ length -= this_length;
+ }
+
+ kunmap_atomic(cpu_vaddr, KM_USER1);
+ kunmap_atomic(gpu_vaddr, KM_USER0);
+
+ return 0;
+}
+
/**
* This is the fast shmem pread path, which attempts to copy_from_user directly
* from the backing pages of the object to the user's address space. On a
@@ -269,6 +337,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
+ int do_bit17_swizzling;
remain = args->size;
@@ -286,13 +355,15 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
down_read(&mm->mmap_sem);
pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
- num_pages, 0, 0, user_pages, NULL);
+ num_pages, 1, 0, user_pages, NULL);
up_read(&mm->mmap_sem);
if (pinned_pages < num_pages) {
ret = -EFAULT;
goto fail_put_user_pages;
}
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_get_pages(obj);
@@ -327,11 +398,20 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_shmem_copy(user_pages[data_page_index],
- data_page_offset,
- obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- page_length);
+ if (do_bit17_swizzling) {
+ ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 1);
+ } else {
+ ret = slow_shmem_copy(user_pages[data_page_index],
+ data_page_offset,
+ obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ page_length);
+ }
if (ret)
goto fail_put_pages;
@@ -383,9 +463,14 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
- if (ret != 0)
+ if (i915_gem_object_needs_bit17_swizzle(obj)) {
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ } else {
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ if (ret != 0)
+ ret = i915_gem_shmem_pread_slow(dev, obj, args,
+ file_priv);
+ }
drm_gem_object_unreference(obj);
@@ -727,6 +812,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
+ int do_bit17_swizzling;
remain = args->size;
@@ -751,6 +837,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto fail_put_user_pages;
}
+ do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gem_object_get_pages(obj);
@@ -785,11 +873,20 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
- ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
- shmem_page_offset,
- user_pages[data_page_index],
- data_page_offset,
- page_length);
+ if (do_bit17_swizzling) {
+ ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length,
+ 0);
+ } else {
+ ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ shmem_page_offset,
+ user_pages[data_page_index],
+ data_page_offset,
+ page_length);
+ }
if (ret)
goto fail_put_pages;
@@ -854,6 +951,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
file_priv);
}
+ } else if (i915_gem_object_needs_bit17_swizzle(obj)) {
+ ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file_priv);
} else {
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
if (ret == -EFAULT) {
@@ -1285,7 +1384,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static void
+void
i915_gem_object_put_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1297,6 +1396,9 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
if (--obj_priv->pages_refcount != 0)
return;
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_save_bit_17_swizzle(obj);
+
for (i = 0; i < page_count; i++)
if (obj_priv->pages[i] != NULL) {
if (obj_priv->dirty)
@@ -1494,8 +1596,19 @@ i915_gem_retire_request(struct drm_device *dev,
if (obj->write_domain != 0)
i915_gem_object_move_to_flushing(obj);
- else
+ else {
+ /* Take a reference on the object so it won't be
+ * freed while the spinlock is held. The list
+ * protection for this spinlock is safe when breaking
+ * the lock like this since the next thing we do
+ * is just get the head of the list again.
+ */
+ drm_gem_object_reference(obj);
i915_gem_object_move_to_inactive(obj);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ drm_gem_object_unreference(obj);
+ spin_lock(&dev_priv->mm.active_list_lock);
+ }
}
out:
spin_unlock(&dev_priv->mm.active_list_lock);
@@ -1884,7 +1997,7 @@ i915_gem_evict_everything(struct drm_device *dev)
return ret;
}
-static int
+int
i915_gem_object_get_pages(struct drm_gem_object *obj)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
@@ -1922,6 +2035,10 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
}
obj_priv->pages[i] = page;
}
+
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_do_bit_17_swizzle(obj);
+
return 0;
}
@@ -3002,13 +3119,13 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
drm_free(*relocs, reloc_count * sizeof(**relocs),
DRM_MEM_DRIVER);
*relocs = NULL;
- return ret;
+ return -EFAULT;
}
reloc_index += exec_list[i].relocation_count;
}
- return ret;
+ return 0;
}
static int
@@ -3017,23 +3134,28 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
struct drm_i915_gem_relocation_entry *relocs)
{
uint32_t reloc_count = 0, i;
- int ret;
+ int ret = 0;
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int unwritten;
user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
- if (ret == 0) {
- ret = copy_to_user(user_relocs,
- &relocs[reloc_count],
- exec_list[i].relocation_count *
- sizeof(*relocs));
+ unwritten = copy_to_user(user_relocs,
+ &relocs[reloc_count],
+ exec_list[i].relocation_count *
+ sizeof(*relocs));
+
+ if (unwritten) {
+ ret = -EFAULT;
+ goto err;
}
reloc_count += exec_list[i].relocation_count;
}
+err:
drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER);
return ret;
@@ -3243,7 +3365,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec_offset = exec_list[args->buffer_count - 1].offset;
#if WATCH_EXEC
- i915_gem_dump_object(object_list[args->buffer_count - 1],
+ i915_gem_dump_object(batch_obj,
args->batch_len,
__func__,
~0);
@@ -3308,10 +3430,12 @@ err:
(uintptr_t) args->buffers_ptr,
exec_list,
sizeof(*exec_list) * args->buffer_count);
- if (ret)
+ if (ret) {
+ ret = -EFAULT;
DRM_ERROR("failed to copy %d exec entries "
"back to user (%d)\n",
args->buffer_count, ret);
+ }
}
/* Copy the updated relocations out regardless of current error
@@ -3593,6 +3717,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
i915_gem_free_mmap_offset(obj);
drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+ kfree(obj_priv->bit_17);
drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debugfs.c b/drivers/gpu/drm/i915/i915_gem_debugfs.c
index a1ac0c5e7307..986f1082c596 100644
--- a/drivers/gpu/drm/i915/i915_gem_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_gem_debugfs.c
@@ -234,6 +234,96 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
+static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+{
+ int page, i;
+ uint32_t *mem;
+
+ for (page = 0; page < page_count; page++) {
+ mem = kmap(pages[page]);
+ for (i = 0; i < PAGE_SIZE; i += 4)
+ seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
+ kunmap(pages[page]);
+ }
+}
+
+static int i915_batchbuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj_priv;
+ int ret;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ obj = obj_priv->obj;
+ if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+ ret = i915_gem_object_get_pages(obj);
+ if (ret) {
+ DRM_ERROR("Failed to get pages: %d\n", ret);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+ return ret;
+ }
+
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
+ i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
+
+ i915_gem_object_put_pages(obj);
+ }
+ }
+
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ return 0;
+}
+
+static int i915_ringbuffer_data(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u8 *virt;
+ uint32_t *ptr, off;
+
+ if (!dev_priv->ring.ring_obj) {
+ seq_printf(m, "No ringbuffer setup\n");
+ return 0;
+ }
+
+ virt = dev_priv->ring.virtual_start;
+
+ for (off = 0; off < dev_priv->ring.Size; off += 4) {
+ ptr = (uint32_t *)(virt + off);
+ seq_printf(m, "%08x : %08x\n", off, *ptr);
+ }
+
+ return 0;
+}
+
+static int i915_ringbuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned int head, tail, mask;
+
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ mask = dev_priv->ring.tail_mask;
+
+ seq_printf(m, "RingHead : %08x\n", head);
+ seq_printf(m, "RingTail : %08x\n", tail);
+ seq_printf(m, "RingMask : %08x\n", mask);
+ seq_printf(m, "RingSize : %08lx\n", dev_priv->ring.Size);
+ seq_printf(m, "Acthd : %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+
+ return 0;
+}
+
+
static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
@@ -243,6 +333,9 @@ static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_hws", i915_hws_info, 0},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_batchbuffers", i915_batchbuffer_info, 0},
};
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 6be3f927c86a..f27e523c764f 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
*
*/
+#include "linux/string.h"
+#include "linux/bitops.h"
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
@@ -127,8 +129,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 swizzling by the CPU in addition. */
- swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
- swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
+ swizzle_y = I915_BIT_6_SWIZZLE_9_17;
}
break;
}
@@ -288,6 +290,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+
+ /* Hide bit 17 swizzling from the user. This prevents old Mesa
+ * from aborting the application on sw fallbacks to bit 17,
+ * and we use the pread/pwrite bit17 paths to swizzle for it.
+ * If there was a user that was relying on the swizzle
+ * information for drm_intel_bo_map()ed reads/writes this would
+ * break it, but we don't have any of those.
+ */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
@@ -354,8 +369,100 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
DRM_ERROR("unknown tiling mode\n");
}
+ /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
+ if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
+ args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
+
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
+
+/**
+ * Swap every 64 bytes of this page around, to account for it having a new
+ * bit 17 of its physical address and therefore being interpreted differently
+ * by the GPU.
+ */
+static int
+i915_gem_swizzle_page(struct page *page)
+{
+ char *vaddr;
+ int i;
+ char temp[64];
+
+ vaddr = kmap(page);
+ if (vaddr == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < PAGE_SIZE; i += 128) {
+ memcpy(temp, &vaddr[i], 64);
+ memcpy(&vaddr[i], &vaddr[i + 64], 64);
+ memcpy(&vaddr[i + 64], temp, 64);
+ }
+
+ kunmap(page);
+
+ return 0;
+}
+
+void
+i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size >> PAGE_SHIFT;
+ int i;
+
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ return;
+
+ if (obj_priv->bit_17 == NULL)
+ return;
+
+ for (i = 0; i < page_count; i++) {
+ char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ if ((new_bit_17 & 0x1) !=
+ (test_bit(i, obj_priv->bit_17) != 0)) {
+ int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
+ if (ret != 0) {
+ DRM_ERROR("Failed to swizzle page\n");
+ return;
+ }
+ set_page_dirty(obj_priv->pages[i]);
+ }
+ }
+}
+
+void
+i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int page_count = obj->size >> PAGE_SHIFT;
+ int i;
+
+ if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
+ return;
+
+ if (obj_priv->bit_17 == NULL) {
+ obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ sizeof(long), GFP_KERNEL);
+ if (obj_priv->bit_17 == NULL) {
+ DRM_ERROR("Failed to allocate memory for bit 17 "
+ "record\n");
+ return;
+ }
+ }
+
+ for (i = 0; i < page_count; i++) {
+ if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
+ __set_bit(i, obj_priv->bit_17);
+ else
+ __clear_bit(i, obj_priv->bit_17);
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 64773ce52964..c2c8e95ff14d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -367,6 +367,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
+ .find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_IGD_LVDS */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@@ -380,6 +381,7 @@ static const intel_limit_t intel_limits[] = {
/* IGD only supports single-channel mode. */
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
+ .find_pll = intel_find_best_PLL,
},
};
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index b7f0ebe9f810..3e094beecb99 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -864,8 +864,8 @@ static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
static struct sysrq_key_op sysrq_intelfb_restore_op = {
.handler = intelfb_sysrq,
- .help_msg = "force fb",
- .action_msg = "force restore of fb console",
+ .help_msg = "force-fb(G)",
+ .action_msg = "Restore framebuffer console",
};
int intelfb_probe(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index b06a4a3ff08d..550374225388 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -38,7 +38,7 @@
struct intel_hdmi_priv {
u32 sdvox_reg;
u32 save_SDVOX;
- int has_hdmi_sink;
+ bool has_hdmi_sink;
};
static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -128,6 +128,22 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
return true;
}
+static void
+intel_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
+ struct edid *edid = NULL;
+
+ edid = drm_get_edid(&intel_output->base,
+ &intel_output->ddc_bus->adapter);
+ if (edid != NULL) {
+ hdmi_priv->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+ intel_output->base.display_info.raw_edid = NULL;
+ }
+}
+
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector)
{
@@ -158,9 +174,10 @@ intel_hdmi_detect(struct drm_connector *connector)
return connector_status_unknown;
}
- if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0)
+ if ((I915_READ(PORT_HOTPLUG_STAT) & bit) != 0) {
+ intel_hdmi_sink_detect(connector);
return connector_status_connected;
- else
+ } else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 7b31f55f55c8..9913651c1e17 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1357,6 +1357,23 @@ void intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
intel_sdvo_read_response(intel_output, &response, 2);
}
+static void
+intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct intel_output *intel_output = to_intel_output(connector);
+ struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
+ struct edid *edid = NULL;
+
+ intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
+ edid = drm_get_edid(&intel_output->base,
+ &intel_output->ddc_bus->adapter);
+ if (edid != NULL) {
+ sdvo_priv->is_hdmi = drm_detect_hdmi_monitor(edid);
+ kfree(edid);
+ intel_output->base.display_info.raw_edid = NULL;
+ }
+}
+
static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connector)
{
u8 response[2];
@@ -1371,9 +1388,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
if (status != SDVO_CMD_STATUS_SUCCESS)
return connector_status_unknown;
- if ((response[0] != 0) || (response[1] != 0))
+ if ((response[0] != 0) || (response[1] != 0)) {
+ intel_sdvo_hdmi_sink_detect(connector);
return connector_status_connected;
- else
+ } else
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 7a339dba6a69..bfb92d283260 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -481,11 +481,13 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
{
int count = 10000000;
- while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
+ while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
+ ;
- while (count-- && (VIA_READ(VIA_REG_STATUS) &
+ while (count && (VIA_READ(VIA_REG_STATUS) &
(VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
- VIA_3D_ENG_BUSY))) ;
+ VIA_3D_ENG_BUSY)))
+ --count;
return count;
}
@@ -705,7 +707,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
switch (d_siz->func) {
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
- && count--) {
+ && --count) {
if (!d_siz->wait) {
break;
}
@@ -717,7 +719,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
break;
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
- && count--) {
+ && --count) {
if (!d_siz->wait) {
break;
}
OpenPOWER on IntegriCloud