summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c41
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h13
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c39
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c404
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c17
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
-rw-r--r--include/linux/cpufreq.h5
17 files changed, 638 insertions, 143 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0a5bea9e3585..987a165ede26 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1199,6 +1199,26 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_quick_get);
+/**
+ * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
+ * @cpu: CPU number
+ *
+ * Just return the max possible frequency for a given CPU.
+ */
+unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ unsigned int ret_freq = 0;
+
+ if (policy) {
+ ret_freq = policy->max;
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret_freq;
+}
+EXPORT_SYMBOL(cpufreq_quick_get_max);
+
static unsigned int __cpufreq_get(unsigned int cpu)
{
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 92369655dca3..f88a9b2c977b 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -560,6 +560,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
+ } else if (set->fb->depth != set->crtc->fb->depth) {
+ mode_changed = true;
+ } else if (set->fb->bits_per_pixel !=
+ set->crtc->fb->bits_per_pixel) {
+ mode_changed = true;
} else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a893f7400fa..e2662497d50f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
MEMSTAT_VID_SHIFT);
seq_printf(m, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_GEN6(dev)) {
+ } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -1123,6 +1123,44 @@ static int i915_emon_status(struct seq_file *m, void *unused)
return 0;
}
+static int i915_ring_freq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret;
+ int gpu_freq, ia_freq;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
+ seq_printf(m, "unsupported on this chipset\n");
+ return 0;
+ }
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n");
+
+ for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay;
+ gpu_freq++) {
+ I915_WRITE(GEN6_PCODE_DATA, gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_READ_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode read of freq table timed out\n");
+ continue;
+ }
+ ia_freq = I915_READ(GEN6_PCODE_DATA);
+ seq_printf(m, "%d\t\t%d\n", gpu_freq * 50, ia_freq * 100);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
static int i915_gfxec(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1430,6 +1468,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_inttoext_table", i915_inttoext_table, 0},
{"i915_drpc_info", i915_drpc_info, 0},
{"i915_emon_status", i915_emon_status, 0},
+ {"i915_ring_freq_table", i915_ring_freq_table, 0},
{"i915_gfxec", i915_gfxec, 0},
{"i915_fbc_status", i915_fbc_status, 0},
{"i915_sr_status", i915_sr_status, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 013d304455b9..649278fc268b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -70,6 +70,9 @@ module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
static bool i915_try_reset = true;
module_param_named(reset, i915_try_reset, bool, 0600);
+bool i915_enable_hangcheck = true;
+module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
+
static struct drm_driver driver;
extern int intel_agp_enabled;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f245c588ae95..a15f2c04f6a5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -214,6 +214,8 @@ struct drm_i915_display_funcs {
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
+ int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -994,6 +996,7 @@ extern unsigned int i915_panel_use_ssc;
extern int i915_vbt_sdvo_panel_type;
extern unsigned int i915_enable_rc6;
extern unsigned int i915_enable_fbc;
+extern bool i915_enable_hangcheck;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1163,7 +1166,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
-int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
void i915_gem_do_init(struct drm_device *dev,
@@ -1182,7 +1185,8 @@ int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -1196,9 +1200,14 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
+
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c0d1247f453..e9d1d5c3a696 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1771,8 +1771,11 @@ i915_add_request(struct intel_ring_buffer *ring,
ring->outstanding_lazy_request = false;
if (!dev_priv->mm.suspended) {
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
if (was_empty)
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
@@ -2143,6 +2146,30 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
return 0;
}
+static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
+{
+ u32 old_write_domain, old_read_domains;
+
+ /* Act a barrier for all accesses through the GTT */
+ mb();
+
+ /* Force a pagefault for domain tracking on next user access */
+ i915_gem_release_mmap(obj);
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
+ return;
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+}
+
/**
* Unbinds an object from the GTT aperture.
*/
@@ -2159,23 +2186,28 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
return -EINVAL;
}
- /* blow away mappings if mapped through GTT */
- i915_gem_release_mmap(obj);
-
- /* Move the object to the CPU domain to ensure that
- * any possible CPU writes while it's not in the GTT
- * are flushed when we go to remap it. This will
- * also ensure that all pending GPU writes are finished
- * before we unbind.
- */
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ ret = i915_gem_object_finish_gpu(obj);
if (ret == -ERESTARTSYS)
return ret;
/* Continue on if we fail due to EIO, the GPU is hung so we
* should be safe and we need to cleanup or else we might
* cause memory corruption through use-after-free.
*/
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Move the object to the CPU domain to ensure that
+ * any possible CPU writes while it's not in the GTT
+ * are flushed when we go to remap it.
+ */
+ if (ret == 0)
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret == -ERESTARTSYS)
+ return ret;
if (ret) {
+ /* In the event of a disaster, abandon all caches and
+ * hope for the best.
+ */
i915_gem_clflush_object(obj);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -2997,51 +3029,139 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
return 0;
}
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ int ret;
+
+ if (obj->cache_level == cache_level)
+ return 0;
+
+ if (obj->pin_count) {
+ DRM_DEBUG("can not change the cache level of pinned objects\n");
+ return -EBUSY;
+ }
+
+ if (obj->gtt_space) {
+ ret = i915_gem_object_finish_gpu(obj);
+ if (ret)
+ return ret;
+
+ i915_gem_object_finish_gtt(obj);
+
+ /* Before SandyBridge, you could not use tiling or fence
+ * registers with snooped memory, so relinquish any fences
+ * currently pointing to our region in the aperture.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen < 6) {
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+ }
+
+ i915_gem_gtt_rebind_object(obj, cache_level);
+ }
+
+ if (cache_level == I915_CACHE_NONE) {
+ u32 old_read_domains, old_write_domain;
+
+ /* If we're coming from LLC cached, then we haven't
+ * actually been tracking whether the data is in the
+ * CPU cache or not, since we only allow one bit set
+ * in obj->write_domain and have been skipping the clflushes.
+ * Just set it to the CPU cache for now.
+ */
+ WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
+ WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
+
+ old_read_domains = obj->base.read_domains;
+ old_write_domain = obj->base.write_domain;
+
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+
+ trace_i915_gem_object_change_domain(obj,
+ old_read_domains,
+ old_write_domain);
+ }
+
+ obj->cache_level = cache_level;
+ return 0;
+}
+
/*
- * Prepare buffer for display plane. Use uninterruptible for possible flush
- * wait, as in modesetting process we're not supposed to be interrupted.
+ * Prepare buffer for display plane (scanout, cursors, etc).
+ * Can be called from an uninterruptible phase (modesetting) and allows
+ * any flushes to be pipelined (for pageflips).
+ *
+ * For the display plane, we want to be in the GTT but out of any write
+ * domains. So in many ways this looks like set_to_gtt_domain() apart from the
+ * ability to pipeline the waits, pinning and any additional subtleties
+ * that may differentiate the display plane from ordinary buffers.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
+ u32 alignment,
struct intel_ring_buffer *pipelined)
{
- uint32_t old_read_domains;
+ u32 old_read_domains, old_write_domain;
int ret;
- /* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
- return -EINVAL;
-
ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
-
- /* Currently, we are always called from an non-interruptible context. */
if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj);
if (ret)
return ret;
}
+ /* The display engine is not coherent with the LLC cache on gen6. As
+ * a result, we make sure that the pinning that is about to occur is
+ * done with uncached PTEs. This is lowest common denominator for all
+ * chipsets.
+ *
+ * However for gen6+, we could do better by using the GFDT bit instead
+ * of uncaching, which would allow us to flush all the LLC-cached data
+ * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+ */
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret)
+ return ret;
+
+ /* As the user may map the buffer once pinned in the display plane
+ * (e.g. libkms for the bootup splash), we have to ensure that we
+ * always use map_and_fenceable for all scanout buffers.
+ */
+ ret = i915_gem_object_pin(obj, alignment, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_cpu_write_domain(obj);
+ old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
+
+ /* It should now be out of any other write domains, and we can update
+ * the domain values for our changes.
+ */
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->base.write_domain);
+ old_write_domain);
return 0;
}
int
-i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
+i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
{
int ret;
- if (!obj->active)
+ if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
@@ -3050,6 +3170,9 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj)
return ret;
}
+ /* Ensure that we invalidate the GPU's caches and TLBs. */
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+
return i915_gem_object_wait_rendering(obj);
}
@@ -3576,7 +3699,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+ if (IS_GEN6(dev)) {
+ /* On Gen6, we can have the GPU use the LLC (the CPU
+ * cache) for about a 10% performance improvement
+ * compared to uncached. Graphics requests other than
+ * display scanout are coherent with the CPU in
+ * accessing this cache. This means in this mode we
+ * don't need to clflush on the CPU side, and on the
+ * GPU side we only need to flush internal caches to
+ * get data visible to the CPU.
+ *
+ * However, we maintain the display planes as UC, and so
+ * need to rebind when first used as such.
+ */
+ obj->cache_level = I915_CACHE_LLC;
+ } else
+ obj->cache_level = I915_CACHE_NONE;
+
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e46b645773cf..7a709cd8d543 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -59,24 +59,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
- unsigned int agp_type =
- cache_level_to_agp_type(dev, obj->cache_level);
-
i915_gem_clflush_object(obj);
-
- if (dev_priv->mm.gtt->needs_dmar) {
- BUG_ON(!obj->sg_list);
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start
- >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
+ i915_gem_gtt_rebind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
@@ -110,6 +94,27 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
return 0;
}
+void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ agp_type);
+}
+
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3b03f85ea627..23d1ae67d279 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -361,10 +361,12 @@ static void notify_ring(struct drm_device *dev,
ring->irq_seqno = seqno;
wake_up_all(&ring->irq_queue);
-
- dev_priv->hangcheck_count = 0;
- mod_timer(&dev_priv->hangcheck_timer,
- jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ if (i915_enable_hangcheck) {
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer,
+ jiffies +
+ msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+ }
}
static void gen6_pm_rps_work(struct work_struct *work)
@@ -1664,6 +1666,9 @@ void i915_hangcheck_elapsed(unsigned long data)
uint32_t acthd, instdone, instdone1;
bool err = false;
+ if (!i915_enable_hangcheck)
+ return;
+
/* If all work is done then ACTHD clearly hasn't advanced. */
if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5d5def756c9e..4a446b116e6a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3434,7 +3434,9 @@
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_DATA 0x138128
+#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 5257cfc34c35..8cd1c8b00660 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -878,8 +878,10 @@ int i915_restore_state(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 21b6f93fe919..af3e5813366c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/input.h>
#include <linux/i2c.h>
@@ -1157,12 +1158,15 @@ static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
reg = TRANSCONF(pipe);
val = I915_READ(reg);
- /*
- * make the BPC in transcoder be consistent with
- * that in pipeconf reg.
- */
- val &= ~PIPE_BPC_MASK;
- val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+
+ if (HAS_PCH_IBX(dev_priv->dev)) {
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ val &= ~PIPE_BPC_MASK;
+ val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
@@ -1812,14 +1816,10 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
}
dev_priv->mm.interruptible = false;
- ret = i915_gem_object_pin(obj, alignment, true);
+ ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
if (ret)
goto err_interruptible;
- ret = i915_gem_object_set_to_display_plane(obj, pipelined);
- if (ret)
- goto err_unpin;
-
/* Install a fence for tiled scan-out. Pre-i965 always needs a
* fence, whereas 965+ only requires a fence if using
* framebuffer compression. For simplicity, we always install
@@ -1841,10 +1841,8 @@ err_interruptible:
return ret;
}
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int x, int y, enum mode_set_atomic state)
+static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1887,7 +1885,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
break;
default:
- DRM_ERROR("Unknown color depth\n");
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1897,10 +1895,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
dspcntr &= ~DISPPLANE_TILED;
}
- if (HAS_PCH_SPLIT(dev))
- /* must disable */
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
@@ -1917,6 +1911,99 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(DSPADDR(plane), Start + Offset);
POSTING_READ(reg);
+ return 0;
+}
+
+static int ironlake_update_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ u32 dspcntr;
+ u32 reg;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ reg = DSPCNTR(plane);
+ dspcntr = I915_READ(reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth != 16)
+ return -EINVAL;
+
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ if (fb->depth == 24)
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ else if (fb->depth == 30)
+ dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
+ else
+ return -EINVAL;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(reg, dspcntr);
+
+ Start = obj->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ Start, Offset, x, y, fb->pitch);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ I915_WRITE(DSPSURF(plane), Start);
+ I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE(DSPADDR(plane), Offset);
+ POSTING_READ(reg);
+
+ return 0;
+}
+
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y, enum mode_set_atomic state)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ ret = dev_priv->display.update_plane(crtc, fb, x, y);
+ if (ret)
+ return ret;
+
intel_update_fbc(dev);
intel_increase_pllclock(crtc);
@@ -1971,7 +2058,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
- ret = i915_gem_object_flush_gpu(obj);
+ ret = i915_gem_object_finish_gpu(obj);
(void) ret;
}
@@ -2622,6 +2709,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
@@ -2629,7 +2717,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
- temp |= TRANS_DP_8BPC;
+ temp |= bpc << 9; /* same format but at 11:9 */
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -4308,6 +4396,133 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
}
+/**
+ * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
+ * @crtc: CRTC structure
+ *
+ * A pipe may be connected to one or more outputs. Based on the depth of the
+ * attached framebuffer, choose a good color depth to use on the pipe.
+ *
+ * If possible, match the pipe depth to the fb depth. In some cases, this
+ * isn't ideal, because the connected output supports a lesser or restricted
+ * set of depths. Resolve that here:
+ * LVDS typically supports only 6bpc, so clamp down in that case
+ * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
+ * Displays may support a restricted set as well, check EDID and clamp as
+ * appropriate.
+ *
+ * RETURNS:
+ * Dithering requirement (i.e. false if display bpc and pipe bpc match,
+ * true if they don't match).
+ */
+static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
+ unsigned int *pipe_bpp)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ unsigned int display_bpc = UINT_MAX, bpc;
+
+ /* Walk the encoders & connectors on this crtc, get min bpc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
+ unsigned int lvds_bpc;
+
+ if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
+ LVDS_A3_POWER_UP)
+ lvds_bpc = 8;
+ else
+ lvds_bpc = 6;
+
+ if (lvds_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
+ display_bpc = lvds_bpc;
+ }
+ continue;
+ }
+
+ if (intel_encoder->type == INTEL_OUTPUT_EDP) {
+ /* Use VBT settings if we have an eDP panel */
+ unsigned int edp_bpc = dev_priv->edp.bpp / 3;
+
+ if (edp_bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
+ display_bpc = edp_bpc;
+ }
+ continue;
+ }
+
+ /* Not one of the known troublemakers, check the EDID */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ if (connector->display_info.bpc < display_bpc) {
+ DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
+ display_bpc = connector->display_info.bpc;
+ }
+ }
+
+ /*
+ * HDMI is either 12 or 8, so if the display lets 10bpc sneak
+ * through, clamp it down. (Note: >12bpc will be caught below.)
+ */
+ if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
+ if (display_bpc > 8 && display_bpc < 12) {
+ DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n");
+ display_bpc = 12;
+ } else {
+ DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n");
+ display_bpc = 8;
+ }
+ }
+ }
+
+ /*
+ * We could just drive the pipe at the highest bpc all the time and
+ * enable dithering as needed, but that costs bandwidth. So choose
+ * the minimum value that expresses the full color range of the fb but
+ * also stays within the max display bpc discovered above.
+ */
+
+ switch (crtc->fb->depth) {
+ case 8:
+ bpc = 8; /* since we go through a colormap */
+ break;
+ case 15:
+ case 16:
+ bpc = 6; /* min is 18bpp */
+ break;
+ case 24:
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ case 30:
+ bpc = min((unsigned int)10, display_bpc);
+ break;
+ case 48:
+ bpc = min((unsigned int)12, display_bpc);
+ break;
+ default:
+ DRM_DEBUG("unsupported depth, assuming 24 bits\n");
+ bpc = min((unsigned int)8, display_bpc);
+ break;
+ }
+
+ DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n",
+ bpc, display_bpc);
+
+ *pipe_bpp = bpc * 3;
+
+ return display_bpc != bpc;
+}
+
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -4720,7 +4935,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct fdi_m_n m_n = {0};
u32 temp;
u32 lvds_sync = 0;
- int target_clock, pixel_multiplier, lane, link_bw, bpp, factor;
+ int target_clock, pixel_multiplier, lane, link_bw, factor;
+ unsigned int pipe_bpp;
+ bool dither;
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
@@ -4847,56 +5064,37 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- if (is_lvds) {
- /* the BPC will be 6 if it is 18-bit LVDS panel */
- if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
- temp |= PIPE_8BPC;
- else
- temp |= PIPE_6BPC;
- } else if (has_edp_encoder) {
- switch (dev_priv->edp.bpp/3) {
- case 8:
- temp |= PIPE_8BPC;
- break;
- case 10:
- temp |= PIPE_10BPC;
- break;
- case 6:
- temp |= PIPE_6BPC;
- break;
- case 12:
- temp |= PIPE_12BPC;
- break;
- }
- } else
- temp |= PIPE_8BPC;
- I915_WRITE(PIPECONF(pipe), temp);
-
- switch (temp & PIPE_BPC_MASK) {
- case PIPE_8BPC:
- bpp = 24;
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ switch (pipe_bpp) {
+ case 18:
+ temp |= PIPE_6BPC;
break;
- case PIPE_10BPC:
- bpp = 30;
+ case 24:
+ temp |= PIPE_8BPC;
break;
- case PIPE_6BPC:
- bpp = 18;
+ case 30:
+ temp |= PIPE_10BPC;
break;
- case PIPE_12BPC:
- bpp = 36;
+ case 36:
+ temp |= PIPE_12BPC;
break;
default:
- DRM_ERROR("unknown pipe bpc value\n");
- bpp = 24;
+ WARN(1, "intel_choose_pipe_bpp returned invalid value\n");
+ temp |= PIPE_8BPC;
+ pipe_bpp = 24;
+ break;
}
+ intel_crtc->bpp = pipe_bpp;
+ I915_WRITE(PIPECONF(pipe), temp);
+
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
- u32 bps = target_clock * bpp * 21 / 20;
+ u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
@@ -4904,7 +5102,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
- ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
+ ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
+ &m_n);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
@@ -5107,14 +5306,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(PCH_LVDS, temp);
}
- /* set the dithering flag and clear for anything other than a panel. */
pipeconf &= ~PIPECONF_DITHER_EN;
pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
- if (dev_priv->lvds_dither && (is_lvds || has_edp_encoder)) {
+ if ((is_lvds && dev_priv->lvds_dither) || dither) {
pipeconf |= PIPECONF_DITHER_EN;
pipeconf |= PIPECONF_DITHER_TYPE_ST1;
}
-
if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
@@ -5434,21 +5631,15 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_locked;
}
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
- if (ret) {
- DRM_ERROR("failed to pin cursor bo\n");
- goto fail_locked;
- }
-
- ret = i915_gem_object_set_to_gtt_domain(obj, 0);
+ ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
- goto fail_unpin;
+ goto fail_locked;
}
ret = i915_gem_object_put_fence(obj);
if (ret) {
- DRM_ERROR("failed to move cursor bo into the GTT\n");
+ DRM_ERROR("failed to release fence for cursor");
goto fail_unpin;
}
@@ -6643,6 +6834,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
intel_crtc_reset(&intel_crtc->base);
intel_crtc->active = true; /* force the pipe off on setup_init_config */
+ intel_crtc->bpp = 24; /* default for pre-Ironlake */
if (HAS_PCH_SPLIT(dev)) {
intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6869,6 +7061,11 @@ int intel_framebuffer_init(struct drm_device *dev,
switch (mode_cmd->bpp) {
case 8:
case 16:
+ /* Only pre-ILK can handle 5:5:5 */
+ if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
+ return -EINVAL;
+ break;
+
case 24:
case 32:
break;
@@ -7283,6 +7480,59 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->dev->struct_mutex);
}
+void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
+{
+ int min_freq = 15;
+ int gpu_freq, ia_freq, max_ia_freq;
+ int scaling_factor = 180;
+
+ max_ia_freq = cpufreq_quick_get_max(0);
+ /*
+ * Default to measured freq if none found, PCU will ensure we don't go
+ * over
+ */
+ if (!max_ia_freq)
+ max_ia_freq = tsc_khz;
+
+ /* Convert from kHz to MHz */
+ max_ia_freq /= 1000;
+
+ mutex_lock(&dev_priv->dev->struct_mutex);
+
+ /*
+ * For each potential GPU frequency, load a ring frequency we'd like
+ * to use for memory access. We do this by specifying the IA frequency
+ * the PCU should use as a reference to determine the ring frequency.
+ */
+ for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
+ gpu_freq--) {
+ int diff = dev_priv->max_delay - gpu_freq;
+
+ /*
+ * For GPU frequencies less than 750MHz, just use the lowest
+ * ring freq.
+ */
+ if (gpu_freq < min_freq)
+ ia_freq = 800;
+ else
+ ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
+ ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
+
+ I915_WRITE(GEN6_PCODE_DATA,
+ (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
+ gpu_freq);
+ I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
+ GEN6_PCODE_READY) == 0, 10)) {
+ DRM_ERROR("pcode write of freq table timed out\n");
+ continue;
+ }
+ }
+
+ mutex_unlock(&dev_priv->dev->struct_mutex);
+}
+
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7639,9 +7889,11 @@ static void intel_init_display(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.dpms = ironlake_crtc_dpms;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
+ dev_priv->display.update_plane = ironlake_update_plane;
} else {
dev_priv->display.dpms = i9xx_crtc_dpms;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
+ dev_priv->display.update_plane = i9xx_update_plane;
}
if (I915_HAS_FBC(dev)) {
@@ -7926,8 +8178,10 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
gen6_enable_rps(dev_priv);
+ gen6_update_ring_freq(dev_priv);
+ }
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -7968,7 +8222,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev))
+ if (IS_GEN6(dev) || IS_GEN7(dev))
gen6_disable_rps(dev);
if (IS_IRONLAKE_M(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index e2aced6eec4c..f797fb58ba9c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -178,12 +178,14 @@ intel_dp_link_clock(uint8_t link_bw)
static int
intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = intel_dp->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int bpp = 24;
- if (is_edp(intel_dp))
- return (pixel_clock * dev_priv->edp.bpp + 7) / 8;
- else
- return pixel_clock * 3;
+ if (intel_crtc)
+ bpp = intel_crtc->bpp;
+
+ return (pixel_clock * bpp + 7) / 8;
}
static int
@@ -681,7 +683,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4, bpp = 24;
+ int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
@@ -700,7 +702,6 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
break;
} else if (is_edp(intel_dp)) {
lane_count = dev_priv->edp.lanes;
- bpp = dev_priv->edp.bpp;
break;
}
}
@@ -710,7 +711,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(bpp, lane_count,
+ intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9ffa61eb4d7e..7ec48d864577 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -170,6 +170,7 @@ struct intel_crtc {
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
bool cursor_visible;
+ unsigned int bpp;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -317,6 +318,7 @@ extern void intel_enable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index aa0a8e83142e..1ed8e6903915 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -124,12 +124,18 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
u32 sdvox;
sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
- sdvox |= intel_hdmi->color_range;
+ if (!HAS_PCH_SPLIT(dev))
+ sdvox |= intel_hdmi->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
+ if (intel_crtc->bpp > 24)
+ sdvox |= COLOR_FORMAT_12bpc;
+ else
+ sdvox |= COLOR_FORMAT_8bpc;
+
/* Required on CPT */
if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
sdvox |= HDMI_MODE_SELECT;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 9e2959bc91cd..d36038086826 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -773,14 +773,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
+ ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL);
if (ret != 0)
return ret;
- ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
- if (ret != 0)
- goto out_unpin;
-
ret = i915_gem_object_put_fence(new_bo);
if (ret)
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 95c4b1429935..e9615685a39c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -236,7 +236,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret)
@@ -776,7 +777,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM;
goto err;
}
- obj->cache_level = I915_CACHE_LLC;
+
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 11be48e0d168..6216115c7789 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -324,11 +324,16 @@ static inline unsigned int cpufreq_get(unsigned int cpu)
/* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_quick_get(unsigned int cpu);
+unsigned int cpufreq_quick_get_max(unsigned int cpu);
#else
static inline unsigned int cpufreq_quick_get(unsigned int cpu)
{
return 0;
}
+static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ return 0;
+}
#endif
OpenPOWER on IntegriCloud