diff options
author | Dave Airlie <airlied@redhat.com> | 2013-06-28 09:50:34 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2013-06-28 09:50:34 +1000 |
commit | 28419261b09aa3a5118647b1ed93809ca97c5354 (patch) | |
tree | 5098381ee695009fce1fa4a25ba34d487eb4f35e | |
parent | 4a009085978de90db40f9f38bcfad501f86ca959 (diff) | |
parent | 854c94a7854a4fabdd7db451cf1774e6dcba6bab (diff) | |
download | blackbird-op-linux-28419261b09aa3a5118647b1ed93809ca97c5354.tar.gz blackbird-op-linux-28419261b09aa3a5118647b1ed93809ca97c5354.zip |
Merge tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel into drm-next
Last 3.11 feature pull. I have a few odds bits and pieces and fixes in my
queue, I'll sort them out later on to see what's for 3.11-fixes and what's
for 3.12. But nothing to hold this here up imo.
Highlights:
- more hangcheck work from Mika and Chris to prepare for arb robustness
- trickle feed fixes from Ville
- first parts of the shared pch pll rework, with some basic hw state
readout and cross-checking (this shuts up the confused pch pll refcount
WARN that Linus just recently forwarded)
- Haswell audio power well support from Wang Xingchao (alsa bits acked by
Takashi)
- some cleanups and asserts sprinkling around the plane/gamma enabling
sequence from Ville
- more gtt refactoring from Ben
- clear up the adjusted->mode vs. pixel clock vs. port clock confusion
- 30bpp support, this time for real hopefully
* tag 'drm-intel-next-2013-06-18' of git://people.freedesktop.org/~danvet/drm-intel: (97 commits)
drm/i915: remove a superflous semi-colon
drm/i915: Kill useless "Enable panel fitter" comments
drm/i915: Remove extra "ring" from error message
drm/i915: simplify the reduced clock handling for pch plls
drm/i915: stop killing pfit on i9xx
drm/i915: explicitly set up PIPECONF (and gamma table) on haswell
drm/i915: set up PIPECONF explicitly for i9xx/vlv platforms
drm/i915: set up PIPECONF explicitly on ilk-ivb
drm/i915: find guilty batch buffer on ring resets
drm/i915: store ring hangcheck action
drm/i915: add batch bo to i915_add_request()
drm/i915: change i915_add_request to macro
drm/i915: add i915_gem_context_get_hang_stats()
drm/i915: add struct i915_ctx_hang_stats
drm/i915: Try harder to disable trickle feed on VLV
drm/i915: fix up pch pll enabling for pixel multipliers
drm/i915: hw state readout and cross-checking for shared dplls
drm/i915: WARN on lack of shared dpll
drm/i915: split up intel_modeset_check_state
drm/i915: extract readout_hw_state from setup_hw_state
...
Conflicts:
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_sdvo.c
33 files changed, 1799 insertions, 881 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 76255a69752a..d4e78b64ca87 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -196,6 +196,32 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) } \ } while (0) +struct file_stats { + int count; + size_t total, active, inactive, unbound; +}; + +static int per_file_stats(int id, void *ptr, void *data) +{ + struct drm_i915_gem_object *obj = ptr; + struct file_stats *stats = data; + + stats->count++; + stats->total += obj->base.size; + + if (obj->gtt_space) { + if (!list_empty(&obj->ring_list)) + stats->active += obj->base.size; + else + stats->inactive += obj->base.size; + } else { + if (!list_empty(&obj->global_list)) + stats->unbound += obj->base.size; + } + + return 0; +} + static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = (struct drm_info_node *) m->private; @@ -204,6 +230,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) u32 count, mappable_count, purgeable_count; size_t size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; + struct drm_file *file; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -215,7 +242,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_memory); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.bound_list, gtt_list); + count_objects(&dev_priv->mm.bound_list, global_list); seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); @@ -230,7 +257,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = purgeable_size = purgeable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { size += obj->base.size, ++count; if (obj->madv == I915_MADV_DONTNEED) purgeable_size += obj->base.size, ++purgeable_count; @@ -238,7 +265,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); size = count = mappable_size = mappable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (obj->fault_mappable) { size += obj->gtt_space->size; ++count; @@ -263,6 +290,21 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->gtt.total, dev_priv->gtt.mappable_end - dev_priv->gtt.start); + seq_printf(m, "\n"); + list_for_each_entry_reverse(file, &dev->filelist, lhead) { + struct file_stats stats; + + memset(&stats, 0, sizeof(stats)); + idr_for_each(&file->object_idr, per_file_stats, &stats); + seq_printf(m, "%s: %u objects, %zu bytes (%zu active, %zu inactive, %zu unbound)\n", + get_pid_task(file->pid, PIDTYPE_PID)->comm, + stats.count, + stats.total, + stats.active, + stats.inactive, + stats.unbound); + } + mutex_unlock(&dev->struct_mutex); return 0; @@ -283,7 +325,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) return ret; total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if (list == PINNED_LIST && obj->pin_count == 0) continue; @@ -1944,7 +1986,8 @@ i915_drop_caches_set(void *data, u64 val) } if (val & DROP_UNBOUND) { - list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, + global_list) if (obj->pages_pin_count == 0) { ret = i915_gem_object_put_pages(obj); if (ret) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index c52d866dfdb0..adb319b53ecd 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1001,8 +1001,7 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; default: - DRM_DEBUG_DRIVER("Unknown parameter %d\n", - param->param); + DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; } @@ -1633,6 +1632,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) /* Start out suspended */ dev_priv->mm.suspended = 1; + if (HAS_POWER_WELL(dev)) + i915_init_power_well(dev); + if (drm_core_check_feature(dev, DRIVER_MODESET)) { ret = i915_load_modeset_init(dev); if (ret < 0) { @@ -1684,6 +1686,9 @@ int i915_driver_unload(struct drm_device *dev) intel_gpu_ips_teardown(); + if (HAS_POWER_WELL(dev)) + i915_remove_power_well(dev); + i915_teardown_sysfs(dev); if (dev_priv->mm.inactive_shrinker.shrink) @@ -1775,7 +1780,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file) struct drm_i915_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); - file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); + file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 59ff7456bd70..deaa32e8113b 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -457,7 +457,6 @@ void intel_detect_pch(struct drm_device *dev) */ if (INTEL_INFO(dev)->num_pipes == 0) { dev_priv->pch_type = PCH_NOP; - dev_priv->num_pch_pll = 0; return; } @@ -476,34 +475,28 @@ void intel_detect_pch(struct drm_device *dev) if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; - dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); WARN_ON(!IS_GEN5(dev)); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; - dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; - dev_priv->num_pch_pll = 2; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev))); } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; - dev_priv->num_pch_pll = 0; DRM_DEBUG_KMS("Found LynxPoint PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(IS_ULT(dev)); } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_LPT; - dev_priv->num_pch_pll = 0; DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_ULT(dev)); } - BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS); } pci_dev_put(pch); } @@ -570,7 +563,7 @@ static int i915_drm_freeze(struct drm_device *dev) intel_opregion_fini(dev); console_lock(); - intel_fbdev_set_suspend(dev, 1); + intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED); console_unlock(); return 0; @@ -614,7 +607,7 @@ void intel_console_resume(struct work_struct *work) struct drm_device *dev = dev_priv->dev; console_lock(); - intel_fbdev_set_suspend(dev, 0); + intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } @@ -683,7 +676,7 @@ static int __i915_drm_thaw(struct drm_device *dev) * path of resume if possible. */ if (console_trylock()) { - intel_fbdev_set_suspend(dev, 0); + intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING); console_unlock(); } else { schedule_work(&dev_priv->console_resume_work); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 359a2003086b..9e1bf6dcbb2a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -132,15 +132,38 @@ enum hpd_pin { list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) -struct intel_pch_pll { +struct drm_i915_private; + +enum intel_dpll_id { + DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ + /* real shared dpll ids must be >= 0 */ + DPLL_ID_PCH_PLL_A, + DPLL_ID_PCH_PLL_B, +}; +#define I915_NUM_PLLS 2 + +struct intel_dpll_hw_state { + uint32_t dpll; + uint32_t fp0; + uint32_t fp1; +}; + +struct intel_shared_dpll { int refcount; /* count of number of CRTCs sharing this PLL */ int active; /* count of number of active CRTCs (i.e. DPMS on) */ bool on; /* is the PLL actually active? Disabled during modeset */ - int pll_reg; - int fp0_reg; - int fp1_reg; + const char *name; + /* should match the index in the dev_priv->shared_dplls array */ + enum intel_dpll_id id; + struct intel_dpll_hw_state hw_state; + void (*enable)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); + void (*disable)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); + bool (*get_hw_state)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state); }; -#define I915_NUM_PLLS 2 /* Used by dp and fdi links */ struct intel_link_m_n { @@ -195,7 +218,6 @@ struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; -struct drm_i915_private; struct intel_opregion { struct opregion_header __iomem *header; @@ -306,6 +328,8 @@ struct drm_i915_error_state { struct intel_crtc_config; struct intel_crtc; +struct intel_limit; +struct dpll; struct drm_i915_display_funcs { bool (*fbc_enabled)(struct drm_device *dev); @@ -313,6 +337,24 @@ struct drm_i915_display_funcs { void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); + /** + * find_dpll() - Find the best values for the PLL + * @limit: limits for the PLL + * @crtc: current CRTC + * @target: target frequency in kHz + * @refclk: reference clock frequency in kHz + * @match_clock: if provided, @best_clock P divider must + * match the P divider from @match_clock + * used for LVDS downclocking + * @best_clock: best PLL values found + * + * Returns true on success, false on failure. + */ + bool (*find_dpll)(const struct intel_limit *limit, + struct drm_crtc *crtc, + int target, int refclk, + struct dpll *match_clock, + struct dpll *best_clock); void (*update_wm)(struct drm_device *dev); void (*update_sprite_wm)(struct drm_device *dev, int pipe, uint32_t sprite_width, int pixel_size, @@ -466,6 +508,13 @@ struct i915_hw_ppgtt { void (*cleanup)(struct i915_hw_ppgtt *ppgtt); }; +struct i915_ctx_hang_stats { + /* This context had batch pending when hang was declared */ + unsigned batch_pending; + + /* This context had batch active when hang was declared */ + unsigned batch_active; +}; /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_ID 0 @@ -476,6 +525,7 @@ struct i915_hw_context { struct drm_i915_file_private *file_priv; struct intel_ring_buffer *ring; struct drm_i915_gem_object *obj; + struct i915_ctx_hang_stats hang_stats; }; enum no_fbc_reason { @@ -720,6 +770,15 @@ struct intel_ilk_power_mgmt { struct drm_i915_gem_object *renderctx; }; +/* Power well structure for haswell */ +struct i915_power_well { + struct drm_device *device; + spinlock_t lock; + /* power well enable/disable usage count */ + int count; + int i915_request; +}; + struct i915_dri1_state { unsigned allow_batchbuffer : 1; u32 __iomem *gfx_hws_cpu_addr; @@ -842,7 +901,6 @@ struct i915_gpu_error { #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) struct timer_list hangcheck_timer; - int hangcheck_count; /* For reset and error_state handling. */ spinlock_t lock; @@ -998,7 +1056,6 @@ typedef struct drm_i915_private { u32 hpd_event_bits; struct timer_list hotplug_reenable_timer; - int num_pch_pll; int num_plane; unsigned long cfb_size; @@ -1059,7 +1116,8 @@ typedef struct drm_i915_private { struct drm_crtc *pipe_to_crtc_mapping[3]; wait_queue_head_t pending_flip_queue; - struct intel_pch_pll pch_plls[I915_NUM_PLLS]; + int num_shared_dpll; + struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; struct intel_ddi_plls ddi_plls; /* Reclocking support */ @@ -1080,6 +1138,9 @@ typedef struct drm_i915_private { * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; + /* Haswell power well */ + struct i915_power_well power_well; + enum no_fbc_reason no_fbc_reason; struct drm_mm_node *compressed_fb; @@ -1154,7 +1215,7 @@ struct drm_i915_gem_object { struct drm_mm_node *gtt_space; /** Stolen memory for this object, instead of being backed by shmem. */ struct drm_mm_node *stolen; - struct list_head gtt_list; + struct list_head global_list; /** This object's place on the active/inactive lists */ struct list_head ring_list; @@ -1301,12 +1362,18 @@ struct drm_i915_gem_request { /** GEM sequence number associated with this request. */ uint32_t seqno; - /** Postion in the ringbuffer of the end of the request */ + /** Position in the ringbuffer of the start of the request */ + u32 head; + + /** Position in the ringbuffer of the end of the request */ u32 tail; /** Context related to this request */ struct i915_hw_context *ctx; + /** Batch buffer related to this request if any */ + struct drm_i915_gem_object *batch_obj; + /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; @@ -1324,6 +1391,8 @@ struct drm_i915_file_private { struct list_head request_list; } mm; struct idr context_idr; + + struct i915_ctx_hang_stats hang_stats; }; #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) @@ -1660,6 +1729,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) { if (obj->fence_reg != I915_FENCE_REG_NONE) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; + WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); dev_priv->fence_regs[obj->fence_reg].pin_count--; } } @@ -1692,9 +1762,12 @@ void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_idle(struct drm_device *dev); -int i915_add_request(struct intel_ring_buffer *ring, - struct drm_file *file, - u32 *seqno); +int __i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + struct drm_i915_gem_object *batch_obj, + u32 *seqno); +#define i915_add_request(ring, seqno) \ + __i915_add_request(ring, NULL, NULL, seqno) int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); @@ -1748,6 +1821,10 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) kref_put(&ctx->ref, i915_gem_context_free); } +struct i915_ctx_hang_stats * __must_check +i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, + struct drm_file *file, + u32 id); int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5b6a92e7102..a6178baccb56 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -176,7 +176,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); @@ -956,7 +956,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno) ret = 0; if (seqno == ring->outstanding_lazy_request) - ret = i915_add_request(ring, NULL, NULL); + ret = i915_add_request(ring, NULL); return ret; } @@ -1676,7 +1676,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) /* ->put_pages might need to allocate memory for the bit17 swizzle * array, hence protect them from being reaped by removing them from gtt * lists early. */ - list_del(&obj->gtt_list); + list_del(&obj->global_list); ops->put_pages(obj); obj->pages = NULL; @@ -1696,7 +1696,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, - gtt_list) { + global_list) { if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && i915_gem_object_put_pages(obj) == 0) { count += obj->base.size >> PAGE_SHIFT; @@ -1733,7 +1733,8 @@ i915_gem_shrink_all(struct drm_i915_private *dev_priv) i915_gem_evict_everything(dev_priv->dev); - list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, + global_list) i915_gem_object_put_pages(obj); } @@ -1858,7 +1859,7 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) if (ret) return ret; - list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list); return 0; } @@ -1996,17 +1997,18 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) return 0; } -int -i915_add_request(struct intel_ring_buffer *ring, - struct drm_file *file, - u32 *out_seqno) +int __i915_add_request(struct intel_ring_buffer *ring, + struct drm_file *file, + struct drm_i915_gem_object *obj, + u32 *out_seqno) { drm_i915_private_t *dev_priv = ring->dev->dev_private; struct drm_i915_gem_request *request; - u32 request_ring_position; + u32 request_ring_position, request_start; int was_empty; int ret; + request_start = intel_ring_get_tail(ring); /* * Emit any outstanding flushes - execbuf can fail to emit the flush * after having emitted the batchbuffer command. Hence we need to fix @@ -2038,8 +2040,17 @@ i915_add_request(struct intel_ring_buffer *ring, request->seqno = intel_ring_get_seqno(ring); request->ring = ring; + request->head = request_start; request->tail = request_ring_position; request->ctx = ring->last_context; + request->batch_obj = obj; + + /* Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when this + * request is retired will the the batch_obj be moved onto the + * inactive_list and lose its active reference. Hence we do not need + * to explicitly hold another reference here. + */ if (request->ctx) i915_gem_context_reference(request->ctx); @@ -2096,6 +2107,94 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request) spin_unlock(&file_priv->mm.lock); } +static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj) +{ + if (acthd >= obj->gtt_offset && + acthd < obj->gtt_offset + obj->base.size) + return true; + + return false; +} + +static bool i915_head_inside_request(const u32 acthd_unmasked, + const u32 request_start, + const u32 request_end) +{ + const u32 acthd = acthd_unmasked & HEAD_ADDR; + + if (request_start < request_end) { + if (acthd >= request_start && acthd < request_end) + return true; + } else if (request_start > request_end) { + if (acthd >= request_start || acthd < request_end) + return true; + } + + return false; +} + +static bool i915_request_guilty(struct drm_i915_gem_request *request, + const u32 acthd, bool *inside) +{ + /* There is a possibility that unmasked head address + * pointing inside the ring, matches the batch_obj address range. + * However this is extremely unlikely. + */ + + if (request->batch_obj) { + if (i915_head_inside_object(acthd, request->batch_obj)) { + *inside = true; + return true; + } + } + + if (i915_head_inside_request(acthd, request->head, request->tail)) { + *inside = false; + return true; + } + + return false; +} + +static void i915_set_reset_status(struct intel_ring_buffer *ring, + struct drm_i915_gem_request *request, + u32 acthd) +{ + struct i915_ctx_hang_stats *hs = NULL; + bool inside, guilty; + + /* Innocent until proven guilty */ + guilty = false; + + if (ring->hangcheck.action != wait && + i915_request_guilty(request, acthd, &inside)) { + DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n", + ring->name, + inside ? "inside" : "flushing", + request->batch_obj ? + request->batch_obj->gtt_offset : 0, + request->ctx ? request->ctx->id : 0, + acthd); + + guilty = true; + } + + /* If contexts are disabled or this is the default context, use + * file_priv->reset_state + */ + if (request->ctx && request->ctx->id != DEFAULT_CONTEXT_ID) + hs = &request->ctx->hang_stats; + else if (request->file_priv) + hs = &request->file_priv->hang_stats; + + if (hs) { + if (guilty) + hs->batch_active++; + else + hs->batch_pending++; + } +} + static void i915_gem_free_request(struct drm_i915_gem_request *request) { list_del(&request->list); @@ -2110,6 +2209,12 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct intel_ring_buffer *ring) { + u32 completed_seqno; + u32 acthd; + + acthd = intel_ring_get_active_head(ring); + completed_seqno = ring->get_seqno(ring, false); + while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -2117,6 +2222,9 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, struct drm_i915_gem_request, list); + if (request->seqno > completed_seqno) + i915_set_reset_status(ring, request, acthd); + i915_gem_free_request(request); } @@ -2276,7 +2384,7 @@ i915_gem_retire_work_handler(struct work_struct *work) idle = true; for_each_ring(ring, dev_priv, i) { if (ring->gpu_caches_dirty) - i915_add_request(ring, NULL, NULL); + i915_add_request(ring, NULL); idle &= list_empty(&ring->request_list); } @@ -2508,9 +2616,10 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) obj->has_aliasing_ppgtt_mapping = 0; } i915_gem_gtt_finish_object(obj); + i915_gem_object_unpin_pages(obj); list_del(&obj->mm_list); - list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); + list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; @@ -2918,7 +3027,7 @@ static void i915_gem_verify_gtt(struct drm_device *dev) struct drm_i915_gem_object *obj; int err = 0; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) { if (obj->gtt_space == NULL) { printk(KERN_ERR "object found on GTT list with no space reserved\n"); err++; @@ -3042,7 +3151,7 @@ search_free: return ret; } - list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); obj->gtt_space = node; @@ -3057,7 +3166,6 @@ search_free: obj->map_and_fenceable = mappable && fenceable; - i915_gem_object_unpin_pages(obj); trace_i915_gem_object_bind(obj, map_and_fenceable); i915_gem_verify_gtt(dev); return 0; @@ -3757,7 +3865,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops) { INIT_LIST_HEAD(&obj->mm_list); - INIT_LIST_HEAD(&obj->gtt_list); + INIT_LIST_HEAD(&obj->global_list); INIT_LIST_HEAD(&obj->ring_list); INIT_LIST_HEAD(&obj->exec_list); @@ -3857,7 +3965,13 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) dev_priv->mm.interruptible = was_interruptible; } - obj->pages_pin_count = 0; + /* Stolen objects don't hold a ref, but do hold pin count. Fix that up + * before progressing. */ + if (obj->stolen) + i915_gem_object_unpin_pages(obj); + + if (WARN_ON(obj->pages_pin_count)) + obj->pages_pin_count = 0; i915_gem_object_put_pages(obj); i915_gem_object_free_mmap_offset(obj); i915_gem_object_release_stolen(obj); @@ -4498,10 +4612,10 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) } cnt = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) if (obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; - list_for_each_entry(obj, &dev_priv->mm.inactive_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) if (obj->pin_count == 0 && obj->pages_pin_count == 0) cnt += obj->base.size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 39bcc087db96..ff471454968d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -303,6 +303,34 @@ static int context_idr_cleanup(int id, void *p, void *data) return 0; } +struct i915_ctx_hang_stats * +i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring, + struct drm_file *file, + u32 id) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_file_private *file_priv = file->driver_priv; + struct i915_hw_context *to; + + if (dev_priv->hw_contexts_disabled) + return ERR_PTR(-ENOENT); + + if (ring->id != RCS) + return ERR_PTR(-EINVAL); + + if (file == NULL) + return ERR_PTR(-EINVAL); + + if (id == DEFAULT_CONTEXT_ID) + return &file_priv->hang_stats; + + to = i915_gem_context_get(file->driver_priv, id); + if (to == NULL) + return ERR_PTR(-ENOENT); + + return &to->hang_stats; +} + void i915_gem_context_close(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; @@ -427,7 +455,7 @@ static int do_switch(struct i915_hw_context *to) from->obj->dirty = 1; BUG_ON(from->obj->ring != ring); - ret = i915_add_request(ring, NULL, NULL); + ret = i915_add_request(ring, NULL); if (ret) { /* Too late, we've already scheduled a context switch. * Try to undo the change so that the hw state is diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index a8bb62ca8756..87a3227e5179 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -786,7 +786,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, obj->dirty = 1; obj->last_write_seqno = intel_ring_get_seqno(ring); if (obj->pin_count) /* check for potential scanout */ - intel_mark_fb_busy(obj); + intel_mark_fb_busy(obj, ring); } trace_i915_gem_object_change_domain(obj, old_read, old_write); @@ -796,13 +796,14 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, static void i915_gem_execbuffer_retire_commands(struct drm_device *dev, struct drm_file *file, - struct intel_ring_buffer *ring) + struct intel_ring_buffer *ring, + struct drm_i915_gem_object *obj) { /* Unconditionally force add_request to emit a full flush. */ ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - (void)i915_add_request(ring, file, NULL); + (void)__i915_add_request(ring, file, obj, NULL); } static int @@ -1083,7 +1084,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); i915_gem_execbuffer_move_to_active(&eb->objects, ring); - i915_gem_execbuffer_retire_commands(dev, file, ring); + i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj); err: eb_destroy(eb); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index ddad13fa3156..5101ab6869b4 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -439,7 +439,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, dev_priv->gtt.total / PAGE_SIZE); - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { i915_gem_clflush_object(obj); i915_gem_gtt_bind_object(obj, obj->cache_level); } @@ -631,7 +631,7 @@ void i915_gem_setup_global_gtt(struct drm_device *dev, dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", obj->gtt_offset, obj->base.size); diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 89cbfab9570e..f713294618fe 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -279,7 +279,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev, goto cleanup; obj->has_dma_mapping = true; - obj->pages_pin_count = 1; + i915_gem_object_pin_pages(obj); obj->stolen = stolen; obj->base.write_domain = I915_GEM_DOMAIN_GTT; @@ -383,7 +383,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, obj->gtt_offset = gtt_offset; obj->has_global_gtt_mapping = 1; - list_add_tail(&obj->gtt_list, &dev_priv->mm.bound_list); + list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); return obj; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e17bbe201195..7857430943ec 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -683,7 +683,6 @@ static void notify_ring(struct drm_device *dev, wake_up_all(&ring->irq_queue); if (i915_enable_hangcheck) { - dev_priv->gpu_error.hangcheck_count = 0; mod_timer(&dev_priv->gpu_error.hangcheck_timer, round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); } @@ -1656,7 +1655,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, struct drm_i915_gem_object *obj; int i = 0; - list_for_each_entry(obj, head, gtt_list) { + list_for_each_entry(obj, head, global_list) { if (obj->pin_count == 0) continue; @@ -1798,7 +1797,7 @@ static void i915_gem_record_active_context(struct intel_ring_buffer *ring, if (ring->id != RCS || !error->ccid) return; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { if ((error->ccid & PAGE_MASK) == obj->gtt_offset) { ering->ctx = i915_error_object_create_sized(dev_priv, obj, 1); @@ -1935,7 +1934,7 @@ static void i915_capture_error_state(struct drm_device *dev) list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) if (obj->pin_count) i++; error->pinned_bo_count = i - error->active_bo_count; @@ -2315,38 +2314,28 @@ ring_last_seqno(struct intel_ring_buffer *ring) struct drm_i915_gem_request, list)->seqno; } -static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, - u32 ring_seqno, bool *err) +static bool +ring_idle(struct intel_ring_buffer *ring, u32 seqno) { - if (list_empty(&ring->request_list) || - i915_seqno_passed(ring_seqno, ring_last_seqno(ring))) { - /* Issue a wake-up to catch stuck h/w. */ - if (waitqueue_active(&ring->irq_queue)) { - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - ring->name); - wake_up_all(&ring->irq_queue); - *err = true; - } - return true; - } - return false; + return (list_empty(&ring->request_list) || + i915_seqno_passed(seqno, ring_last_seqno(ring))); } -static bool semaphore_passed(struct intel_ring_buffer *ring) +static struct intel_ring_buffer * +semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) { struct drm_i915_private *dev_priv = ring->dev->dev_private; - u32 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; - struct intel_ring_buffer *signaller; - u32 cmd, ipehr, acthd_min; + u32 cmd, ipehr, acthd, acthd_min; ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); if ((ipehr & ~(0x3 << 16)) != (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) - return false; + return NULL; /* ACTHD is likely pointing to the dword after the actual command, * so scan backwards until we find the MBOX. */ + acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; acthd_min = max((int)acthd - 3 * 4, 0); do { cmd = ioread32(ring->virtual_start + acthd); @@ -2355,128 +2344,216 @@ static bool semaphore_passed(struct intel_ring_buffer *ring) acthd -= 4; if (acthd < acthd_min) - return false; + return NULL; } while (1); - signaller = &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; - return i915_seqno_passed(signaller->get_seqno(signaller, false), - ioread32(ring->virtual_start+acthd+4)+1); + *seqno = ioread32(ring->virtual_start+acthd+4)+1; + return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; } -static bool kick_ring(struct intel_ring_buffer *ring) +static int semaphore_passed(struct intel_ring_buffer *ring) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - u32 tmp = I915_READ_CTL(ring); - if (tmp & RING_WAIT) { - DRM_ERROR("Kicking stuck wait on %s\n", - ring->name); - I915_WRITE_CTL(ring, tmp); - return true; - } + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_ring_buffer *signaller; + u32 seqno, ctl; - if (INTEL_INFO(dev)->gen >= 6 && - tmp & RING_WAIT_SEMAPHORE && - semaphore_passed(ring)) { - DRM_ERROR("Kicking stuck semaphore on %s\n", - ring->name); - I915_WRITE_CTL(ring, tmp); - return true; - } - return false; + ring->hangcheck.deadlock = true; + + signaller = semaphore_waits_for(ring, &seqno); + if (signaller == NULL || signaller->hangcheck.deadlock) + return -1; + + /* cursory check for an unkickable deadlock */ + ctl = I915_READ_CTL(signaller); + if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) + return -1; + + return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); } -static bool i915_hangcheck_ring_hung(struct intel_ring_buffer *ring) +static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { - if (IS_GEN2(ring->dev)) - return false; + struct intel_ring_buffer *ring; + int i; - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. - */ - return !kick_ring(ring); + for_each_ring(ring, dev_priv, i) + ring->hangcheck.deadlock = false; } -static bool i915_hangcheck_hung(struct drm_device *dev) +static enum intel_ring_hangcheck_action +ring_stuck(struct intel_ring_buffer *ring, u32 acthd) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; - if (dev_priv->gpu_error.hangcheck_count++ > 1) { - bool hung = true; - struct intel_ring_buffer *ring; - int i; + if (ring->hangcheck.acthd != acthd) + return active; - DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); - i915_handle_error(dev, true); + if (IS_GEN2(dev)) + return hung; - for_each_ring(ring, dev_priv, i) - hung &= i915_hangcheck_ring_hung(ring); + /* Is the chip hanging on a WAIT_FOR_EVENT? + * If so we can simply poke the RB_WAIT bit + * and break the hang. This should work on + * all but the second generation chipsets. + */ + tmp = I915_READ_CTL(ring); + if (tmp & RING_WAIT) { + DRM_ERROR("Kicking stuck wait on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return kick; + } - return hung; + if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { + switch (semaphore_passed(ring)) { + default: + return hung; + case 1: + DRM_ERROR("Kicking stuck semaphore on %s\n", + ring->name); + I915_WRITE_CTL(ring, tmp); + return kick; + case 0: + return wait; + } } - return false; + return hung; } /** * This is called when the chip hasn't reported back with completed - * batchbuffers in a long time. The first time this is called we simply record - * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses - * again, we assume the chip is wedged and try to fix it. + * batchbuffers in a long time. We keep track per ring seqno progress and + * if there are no progress, hangcheck score for that ring is increased. + * Further, acthd is inspected to see if the ring is stuck. On stuck case + * we kick the ring. If we see no progress on three subsequent calls + * we assume chip is wedged and try to fix it by resetting the chip. */ void i915_hangcheck_elapsed(unsigned long data) { struct drm_device *dev = (struct drm_device *)data; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_ring_buffer *ring; - bool err = false, idle; int i; - u32 seqno[I915_NUM_RINGS]; - bool work_done; + int busy_count = 0, rings_hung = 0; + bool stuck[I915_NUM_RINGS] = { 0 }; +#define BUSY 1 +#define KICK 5 +#define HUNG 20 +#define FIRE 30 if (!i915_enable_hangcheck) return; - idle = true; for_each_ring(ring, dev_priv, i) { - seqno[i] = ring->get_seqno(ring, false); - idle &= i915_hangcheck_ring_idle(ring, seqno[i], &err); - } - - /* If all work is done then ACTHD clearly hasn't advanced. */ - if (idle) { - if (err) { - if (i915_hangcheck_hung(dev)) - return; - - goto repeat; + u32 seqno, acthd; + bool busy = true; + + semaphore_clear_deadlocks(dev_priv); + + seqno = ring->get_seqno(ring, false); + acthd = intel_ring_get_active_head(ring); + + if (ring->hangcheck.seqno == seqno) { + if (ring_idle(ring, seqno)) { + if (waitqueue_active(&ring->irq_queue)) { + /* Issue a wake-up to catch stuck h/w. */ + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + ring->name); + wake_up_all(&ring->irq_queue); + ring->hangcheck.score += HUNG; + } else + busy = false; + } else { + int score; + + /* We always increment the hangcheck score + * if the ring is busy and still processing + * the same request, so that no single request + * can run indefinitely (such as a chain of + * batches). The only time we do not increment + * the hangcheck score on this ring, if this + * ring is in a legitimate wait for another + * ring. In that case the waiting ring is a + * victim and we want to be sure we catch the + * right culprit. Then every time we do kick + * the ring, add a small increment to the + * score so that we can catch a batch that is + * being repeatedly kicked and so responsible + * for stalling the machine. + */ + ring->hangcheck.action = ring_stuck(ring, + acthd); + + switch (ring->hangcheck.action) { + case wait: + score = 0; + break; + case active: + score = BUSY; + break; + case kick: + score = KICK; + break; + case hung: + score = HUNG; + stuck[i] = true; + break; + } + ring->hangcheck.score += score; + } + } else { + /* Gradually reduce the count so that we catch DoS + * attempts across multiple batches. + */ + if (ring->hangcheck.score > 0) + ring->hangcheck.score--; } - dev_priv->gpu_error.hangcheck_count = 0; - return; + ring->hangcheck.seqno = seqno; + ring->hangcheck.acthd = acthd; + busy_count += busy; } - work_done = false; for_each_ring(ring, dev_priv, i) { - if (ring->hangcheck.seqno != seqno[i]) { - work_done = true; - ring->hangcheck.seqno = seqno[i]; + if (ring->hangcheck.score > FIRE) { + DRM_ERROR("%s on %s\n", + stuck[i] ? "stuck" : "no progress", + ring->name); + rings_hung++; } } - if (!work_done) { - if (i915_hangcheck_hung(dev)) - return; - } else { - dev_priv->gpu_error.hangcheck_count = 0; - } + if (rings_hung) + return i915_handle_error(dev, true); -repeat: - /* Reset timer case chip hangs without another request being added */ - mod_timer(&dev_priv->gpu_error.hangcheck_timer, - round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); + if (busy_count) + /* Reset timer case chip hangs without another request + * being added */ + mod_timer(&dev_priv->gpu_error.hangcheck_timer, + round_jiffies_up(jiffies + + DRM_I915_HANGCHECK_JIFFIES)); +} + +static void ibx_irq_preinstall(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_PCH_NOP(dev)) + return; + + /* south display irq */ + I915_WRITE(SDEIMR, 0xffffffff); + /* + * SDEIER is also touched by the interrupt handler to work around missed + * PCH interrupts. Hence we can't update it after the interrupt handler + * is enabled - instead we unconditionally enable all PCH interrupt + * sources here, but then only unmask them as needed with SDEIMR. + */ + I915_WRITE(SDEIER, 0xffffffff); + POSTING_READ(SDEIER); } /* drm_dma.h hooks @@ -2500,16 +2577,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev) I915_WRITE(GTIER, 0x0); POSTING_READ(GTIER); - /* south display irq */ - I915_WRITE(SDEIMR, 0xffffffff); - /* - * SDEIER is also touched by the interrupt handler to work around missed - * PCH interrupts. Hence we can't update it after the interrupt handler - * is enabled - instead we unconditionally enable all PCH interrupt - * sources here, but then only unmask them as needed with SDEIMR. - */ - I915_WRITE(SDEIER, 0xffffffff); - POSTING_READ(SDEIER); + ibx_irq_preinstall(dev); } static void ivybridge_irq_preinstall(struct drm_device *dev) @@ -2536,19 +2604,7 @@ static void ivybridge_irq_preinstall(struct drm_device *dev) I915_WRITE(GEN6_PMIER, 0x0); POSTING_READ(GEN6_PMIER); - if (HAS_PCH_NOP(dev)) - return; - - /* south display irq */ - I915_WRITE(SDEIMR, 0xffffffff); - /* - * SDEIER is also touched by the interrupt handler to work around missed - * PCH interrupts. Hence we can't update it after the interrupt handler - * is enabled - instead we unconditionally enable all PCH interrupt - * sources here, but then only unmask them as needed with SDEIMR. - */ - I915_WRITE(SDEIER, 0xffffffff); - POSTING_READ(SDEIER); + ibx_irq_preinstall(dev); } static void valleyview_irq_preinstall(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a593d20036c..2102ff32ee20 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -147,15 +147,9 @@ #define VGA_MSR_MEM_EN (1<<1) #define VGA_MSR_CGA_MODE (1<<0) -/* - * SR01 is the only VGA register touched on non-UMS setups. - * VLV doesn't do UMS, so the sequencer index/data registers - * are the only VGA registers which need to include - * display_mmio_offset. - */ -#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4) +#define VGA_SR_INDEX 0x3c4 #define SR01 1 -#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) +#define VGA_SR_DATA 0x3c5 #define VGA_AR_INDEX 0x3c0 #define VGA_AR_VID_EN (1<<5) @@ -1026,6 +1020,10 @@ #define IPS_CTL 0x43408 #define IPS_ENABLE (1 << 31) +#define MSG_FBC_REND_STATE 0x50380 +#define FBC_REND_NUKE (1<<2) +#define FBC_REND_CACHE_CLEAN (1<<1) + #define _HSW_PIPE_SLICE_CHICKEN_1_A 0x420B0 #define _HSW_PIPE_SLICE_CHICKEN_1_B 0x420B4 #define HSW_BYPASS_FBC_QUEUE (1<<22) @@ -1256,7 +1254,7 @@ #define DSTATE_PLL_D3_OFF (1<<3) #define DSTATE_GFX_CLOCK_GATING (1<<1) #define DSTATE_DOT_CLOCK_GATING (1<<0) -#define DSPCLK_GATE_D 0x6200 +#define DSPCLK_GATE_D (dev_priv->info->display_mmio_offset + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ @@ -1369,6 +1367,8 @@ #define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) #define FW_CSPWRDWNEN (1<<15) +#define MI_ARB_VLV (VLV_DISPLAY_BASE + 0x6504) + /* * Palette regs */ @@ -3672,9 +3672,9 @@ #define _GAMMA_MODE_B 0x4ac80 #define GAMMA_MODE(pipe) _PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B) #define GAMMA_MODE_MODE_MASK (3 << 0) -#define GAMMA_MODE_MODE_8bit (0 << 0) -#define GAMMA_MODE_MODE_10bit (1 << 0) -#define GAMMA_MODE_MODE_12bit (2 << 0) +#define GAMMA_MODE_MODE_8BIT (0 << 0) +#define GAMMA_MODE_MODE_10BIT (1 << 0) +#define GAMMA_MODE_MODE_12BIT (2 << 0) #define GAMMA_MODE_MODE_SPLIT (3 << 0) /* interrupts */ @@ -3932,15 +3932,15 @@ #define _PCH_DPLL_A 0xc6014 #define _PCH_DPLL_B 0xc6018 -#define _PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) +#define PCH_DPLL(pll) (pll == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) #define _PCH_FPA0 0xc6040 #define FP_CB_TUNE (0x3<<22) #define _PCH_FPA1 0xc6044 #define _PCH_FPB0 0xc6048 #define _PCH_FPB1 0xc604c -#define _PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) -#define _PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) +#define PCH_FP0(pll) (pll == 0 ? _PCH_FPA0 : _PCH_FPB0) +#define PCH_FP1(pll) (pll == 0 ? _PCH_FPA1 : _PCH_FPB1) #define PCH_DPLL_TEST 0xc606c @@ -3980,15 +3980,9 @@ #define PCH_SSC4_AUX_PARMS 0xc6214 #define PCH_DPLL_SEL 0xc7000 -#define TRANSA_DPLL_ENABLE (1<<3) -#define TRANSA_DPLLB_SEL (1<<0) -#define TRANSA_DPLLA_SEL 0 -#define TRANSB_DPLL_ENABLE (1<<7) -#define TRANSB_DPLLB_SEL (1<<4) -#define TRANSB_DPLLA_SEL (0) -#define TRANSC_DPLL_ENABLE (1<<11) -#define TRANSC_DPLLB_SEL (1<<8) -#define TRANSC_DPLLA_SEL (0) +#define TRANS_DPLLB_SEL(pipe) (1 << (pipe * 4)) +#define TRANS_DPLLA_SEL(pipe) 0 +#define TRANS_DPLL_ENABLE(pipe) (1 << (pipe * 4 + 3)) /* transcoder */ diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c index 5ef30b2e6bc6..967da4772c44 100644 --- a/drivers/gpu/drm/i915/i915_ums.c +++ b/drivers/gpu/drm/i915/i915_ums.c @@ -41,7 +41,7 @@ static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) return false; if (HAS_PCH_SPLIT(dev)) - dpll_reg = _PCH_DPLL(pipe); + dpll_reg = PCH_DPLL(pipe); else dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 9649df806079..224ce25129ce 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -624,7 +624,7 @@ intel_ddi_calculate_wrpll(int clock /* in Hz */, clock, *p_out, *n2_out, *r2_out); } -bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) +bool intel_ddi_pll_mode_set(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); @@ -634,6 +634,7 @@ bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock) int type = intel_encoder->type; enum pipe pipe = intel_crtc->pipe; uint32_t reg, val; + int clock = intel_crtc->config.port_clock; /* TODO: reuse PLLs when possible (compare values) */ @@ -1278,7 +1279,6 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_NVSYNC; pipe_config->adjusted_mode.flags |= flags; - pipe_config->pixel_multiplier = 1; } static void intel_ddi_destroy(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6eb99e13c37d..b08d1f9ce0de 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -59,24 +59,6 @@ typedef struct intel_limit intel_limit_t; struct intel_limit { intel_range_t dot, vco, n, m, m1, m2, p, p1; intel_p2_t p2; - /** - * find_pll() - Find the best values for the PLL - * @limit: limits for the PLL - * @crtc: current CRTC - * @target: target frequency in kHz - * @refclk: reference clock frequency in kHz - * @match_clock: if provided, @best_clock P divider must - * match the P divider from @match_clock - * used for LVDS downclocking - * @best_clock: best PLL values found - * - * Returns true on success, false on failure. - */ - bool (*find_pll)(const intel_limit_t *limit, - struct drm_crtc *crtc, - int target, int refclk, - intel_clock_t *match_clock, - intel_clock_t *best_clock); }; /* FDI */ @@ -92,20 +74,6 @@ intel_pch_rawclk(struct drm_device *dev) return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; } -static bool -intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock); -static bool -intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock); - -static bool -intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock); - static inline u32 /* units of 100MHz */ intel_fdi_link_freq(struct drm_device *dev) { @@ -127,7 +95,6 @@ static const intel_limit_t intel_limits_i8xx_dvo = { .p1 = { .min = 2, .max = 33 }, .p2 = { .dot_limit = 165000, .p2_slow = 4, .p2_fast = 2 }, - .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i8xx_lvds = { @@ -141,7 +108,6 @@ static const intel_limit_t intel_limits_i8xx_lvds = { .p1 = { .min = 1, .max = 6 }, .p2 = { .dot_limit = 165000, .p2_slow = 14, .p2_fast = 7 }, - .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_sdvo = { @@ -155,7 +121,6 @@ static const intel_limit_t intel_limits_i9xx_sdvo = { .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, - .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_i9xx_lvds = { @@ -169,7 +134,6 @@ static const intel_limit_t intel_limits_i9xx_lvds = { .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 7 }, - .find_pll = intel_find_best_PLL, }; @@ -186,7 +150,6 @@ static const intel_limit_t intel_limits_g4x_sdvo = { .p2_slow = 10, .p2_fast = 10 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_hdmi = { @@ -200,7 +163,6 @@ static const intel_limit_t intel_limits_g4x_hdmi = { .p1 = { .min = 1, .max = 8}, .p2 = { .dot_limit = 165000, .p2_slow = 10, .p2_fast = 5 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_single_channel_lvds = { @@ -215,7 +177,6 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = { .p2 = { .dot_limit = 0, .p2_slow = 14, .p2_fast = 14 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { @@ -230,7 +191,6 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { .p2 = { .dot_limit = 0, .p2_slow = 7, .p2_fast = 7 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_pineview_sdvo = { @@ -246,7 +206,6 @@ static const intel_limit_t intel_limits_pineview_sdvo = { .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 200000, .p2_slow = 10, .p2_fast = 5 }, - .find_pll = intel_find_best_PLL, }; static const intel_limit_t intel_limits_pineview_lvds = { @@ -260,7 +219,6 @@ static const intel_limit_t intel_limits_pineview_lvds = { .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 112000, .p2_slow = 14, .p2_fast = 14 }, - .find_pll = intel_find_best_PLL, }; /* Ironlake / Sandybridge @@ -279,7 +237,6 @@ static const intel_limit_t intel_limits_ironlake_dac = { .p1 = { .min = 1, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 10, .p2_fast = 5 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_single_lvds = { @@ -293,7 +250,6 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = { .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds = { @@ -307,7 +263,6 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = { .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, - .find_pll = intel_g4x_find_best_PLL, }; /* LVDS 100mhz refclk limits. */ @@ -322,7 +277,6 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { .p1 = { .min = 2, .max = 8 }, .p2 = { .dot_limit = 225000, .p2_slow = 14, .p2_fast = 14 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { @@ -336,7 +290,6 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { .p1 = { .min = 2, .max = 6 }, .p2 = { .dot_limit = 225000, .p2_slow = 7, .p2_fast = 7 }, - .find_pll = intel_g4x_find_best_PLL, }; static const intel_limit_t intel_limits_vlv_dac = { @@ -350,7 +303,6 @@ static const intel_limit_t intel_limits_vlv_dac = { .p1 = { .min = 1, .max = 3 }, .p2 = { .dot_limit = 270000, .p2_slow = 2, .p2_fast = 20 }, - .find_pll = intel_vlv_find_best_pll, }; static const intel_limit_t intel_limits_vlv_hdmi = { @@ -364,7 +316,6 @@ static const intel_limit_t intel_limits_vlv_hdmi = { .p1 = { .min = 2, .max = 3 }, .p2 = { .dot_limit = 270000, .p2_slow = 2, .p2_fast = 20 }, - .find_pll = intel_vlv_find_best_pll, }; static const intel_limit_t intel_limits_vlv_dp = { @@ -378,7 +329,6 @@ static const intel_limit_t intel_limits_vlv_dp = { .p1 = { .min = 1, .max = 3 }, .p2 = { .dot_limit = 270000, .p2_slow = 2, .p2_fast = 20 }, - .find_pll = intel_vlv_find_best_pll, }; static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, @@ -475,12 +425,8 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); } -static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) +static void i9xx_clock(int refclk, intel_clock_t *clock) { - if (IS_PINEVIEW(dev)) { - pineview_clock(refclk, clock); - return; - } clock->m = i9xx_dpll_compute_m(clock); clock->p = clock->p1 * clock->p2; clock->vco = refclk * clock->m / (clock->n + 2); @@ -538,10 +484,9 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static bool -intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, +i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) - { struct drm_device *dev = crtc->dev; intel_clock_t clock; @@ -570,8 +515,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, clock.m1++) { for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { - /* m1 is always 0 in Pineview */ - if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) + if (clock.m2 >= clock.m1) break; for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { @@ -579,7 +523,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, clock.p1 <= limit->p1.max; clock.p1++) { int this_err; - intel_clock(dev, refclk, &clock); + i9xx_clock(refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; @@ -601,9 +545,68 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) +pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) +{ + struct drm_device *dev = crtc->dev; + intel_clock_t clock; + int err = target; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { + /* + * For LVDS just rely on its current settings for dual-channel. + * We haven't figured out how to reliably set up different + * single/dual channel state, if we even can. + */ + if (intel_is_dual_link_lvds(dev)) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; + } else { + if (target < limit->p2.dot_limit) + clock.p2 = limit->p2.p2_slow; + else + clock.p2 = limit->p2.p2_fast; + } + + memset(best_clock, 0, sizeof(*best_clock)); + + for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; + clock.m1++) { + for (clock.m2 = limit->m2.min; + clock.m2 <= limit->m2.max; clock.m2++) { + for (clock.n = limit->n.min; + clock.n <= limit->n.max; clock.n++) { + for (clock.p1 = limit->p1.min; + clock.p1 <= limit->p1.max; clock.p1++) { + int this_err; + + pineview_clock(refclk, &clock); + if (!intel_PLL_is_valid(dev, limit, + &clock)) + continue; + if (match_clock && + clock.p != match_clock->p) + continue; + + this_err = abs(clock.dot - target); + if (this_err < err) { + *best_clock = clock; + err = this_err; + } + } + } + } + } + + return (err != target); +} + +static bool +g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) { struct drm_device *dev = crtc->dev; intel_clock_t clock; @@ -638,7 +641,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, clock.p1 >= limit->p1.min; clock.p1--) { int this_err; - intel_clock(dev, refclk, &clock); + i9xx_clock(refclk, &clock); if (!intel_PLL_is_valid(dev, limit, &clock)) continue; @@ -658,9 +661,9 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, } static bool -intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, - int target, int refclk, intel_clock_t *match_clock, - intel_clock_t *best_clock) +vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, + int target, int refclk, intel_clock_t *match_clock, + intel_clock_t *best_clock) { u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2; u32 m, n, fastclk; @@ -906,14 +909,24 @@ static void assert_pll(struct drm_i915_private *dev_priv, #define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_disabled(d, p) assert_pll(d, p, false) +static struct intel_shared_dpll * +intel_crtc_to_shared_dpll(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + + if (crtc->config.shared_dpll < 0) + return NULL; + + return &dev_priv->shared_dplls[crtc->config.shared_dpll]; +} + /* For ILK+ */ -static void assert_pch_pll(struct drm_i915_private *dev_priv, - struct intel_pch_pll *pll, - struct intel_crtc *crtc, - bool state) +static void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state) { - u32 val; bool cur_state; + struct intel_dpll_hw_state hw_state; if (HAS_PCH_LPT(dev_priv->dev)) { DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n"); @@ -921,36 +934,16 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv, } if (WARN (!pll, - "asserting PCH PLL %s with no PLL\n", state_string(state))) + "asserting DPLL %s with no DPLL\n", state_string(state))) return; - val = I915_READ(pll->pll_reg); - cur_state = !!(val & DPLL_VCO_ENABLE); + cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); WARN(cur_state != state, - "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n", - pll->pll_reg, state_string(state), state_string(cur_state), val); - - /* Make sure the selected PLL is correctly attached to the transcoder */ - if (crtc && HAS_PCH_CPT(dev_priv->dev)) { - u32 pch_dpll; - - pch_dpll = I915_READ(PCH_DPLL_SEL); - cur_state = pll->pll_reg == _PCH_DPLL_B; - if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state, - "PLL[%d] not attached to this transcoder %c: %08x\n", - cur_state, pipe_name(crtc->pipe), pch_dpll)) { - cur_state = !!(val >> (4*crtc->pipe + 3)); - WARN(cur_state != state, - "PLL[%d] not %s on this transcoder %c: %08x\n", - pll->pll_reg == _PCH_DPLL_B, - state_string(state), - pipe_name(crtc->pipe), - val); - } - } + "%s assertion failure (expected %s, current %s)\n", + pll->name, state_string(state), state_string(cur_state)); } -#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true) -#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false) +#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) +#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1102,12 +1095,13 @@ static void assert_plane(struct drm_i915_private *dev_priv, static void assert_planes_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct drm_device *dev = dev_priv->dev; int reg, i; u32 val; int cur_pipe; - /* Planes are fixed to pipes on ILK+ */ - if (HAS_PCH_SPLIT(dev_priv->dev) || IS_VALLEYVIEW(dev_priv->dev)) { + /* Primary planes are fixed to pipes on gen4+ */ + if (INTEL_INFO(dev)->gen >= 4) { reg = DSPCNTR(pipe); val = I915_READ(reg); WARN((val & DISPLAY_PLANE_ENABLE), @@ -1117,7 +1111,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, } /* Need to check both planes against the pipe */ - for (i = 0; i < 2; i++) { + for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) { reg = DSPCNTR(i); val = I915_READ(reg); cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> @@ -1131,19 +1125,30 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv, static void assert_sprites_disabled(struct drm_i915_private *dev_priv, enum pipe pipe) { + struct drm_device *dev = dev_priv->dev; int reg, i; u32 val; - if (!IS_VALLEYVIEW(dev_priv->dev)) - return; - - /* Need to check both planes against the pipe */ - for (i = 0; i < dev_priv->num_plane; i++) { - reg = SPCNTR(pipe, i); + if (IS_VALLEYVIEW(dev)) { + for (i = 0; i < dev_priv->num_plane; i++) { + reg = SPCNTR(pipe, i); + val = I915_READ(reg); + WARN((val & SP_ENABLE), + "sprite %c assertion failure, should be off on pipe %c but is still active\n", + sprite_name(pipe, i), pipe_name(pipe)); + } + } else if (INTEL_INFO(dev)->gen >= 7) { + reg = SPRCTL(pipe); val = I915_READ(reg); - WARN((val & SP_ENABLE), + WARN((val & SPRITE_ENABLE), "sprite %c assertion failure, should be off on pipe %c but is still active\n", - sprite_name(pipe, i), pipe_name(pipe)); + plane_name(pipe), pipe_name(pipe)); + } else if (INTEL_INFO(dev)->gen >= 5) { + reg = DVSCNTR(pipe); + val = I915_READ(reg); + WARN((val & DVS_ENABLE), + "sprite %c assertion failure, should be off on pipe %c but is still active\n", + plane_name(pipe), pipe_name(pipe)); } } @@ -1382,94 +1387,71 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port) } /** - * ironlake_enable_pch_pll - enable PCH PLL + * ironlake_enable_shared_dpll - enable PCH PLL * @dev_priv: i915 private structure * @pipe: pipe PLL to enable * * The PCH PLL needs to be enabled before the PCH transcoder, since it * drives the transcoder clock. */ -static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc) +static void ironlake_enable_shared_dpll(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; - struct intel_pch_pll *pll; - int reg; - u32 val; + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); /* PCH PLLs only available on ILK, SNB and IVB */ BUG_ON(dev_priv->info->gen < 5); - pll = intel_crtc->pch_pll; - if (pll == NULL) + if (WARN_ON(pll == NULL)) return; if (WARN_ON(pll->refcount == 0)) return; - DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n", - pll->pll_reg, pll->active, pll->on, - intel_crtc->base.base.id); - - /* PCH refclock must be enabled first */ - assert_pch_refclk_enabled(dev_priv); + DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n", + pll->name, pll->active, pll->on, + crtc->base.base.id); - if (pll->active++ && pll->on) { - assert_pch_pll_enabled(dev_priv, pll, NULL); + if (pll->active++) { + WARN_ON(!pll->on); + assert_shared_dpll_enabled(dev_priv, pll); return; } + WARN_ON(pll->on); - DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg); - - reg = pll->pll_reg; - val = I915_READ(reg); - val |= DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); - udelay(200); - + DRM_DEBUG_KMS("enabling %s\n", pll->name); + pll->enable(dev_priv, pll); pll->on = true; } -static void intel_disable_pch_pll(struct intel_crtc *intel_crtc) +static void intel_disable_shared_dpll(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; - struct intel_pch_pll *pll = intel_crtc->pch_pll; - int reg; - u32 val; + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); - if (pll == NULL) + if (WARN_ON(pll == NULL)) return; if (WARN_ON(pll->refcount == 0)) return; - DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n", - pll->pll_reg, pll->active, pll->on, - intel_crtc->base.base.id); + DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", + pll->name, pll->active, pll->on, + crtc->base.base.id); if (WARN_ON(pll->active == 0)) { - assert_pch_pll_disabled(dev_priv, pll, NULL); + assert_shared_dpll_disabled(dev_priv, pll); return; } - if (--pll->active) { - assert_pch_pll_enabled(dev_priv, pll, NULL); + assert_shared_dpll_enabled(dev_priv, pll); + WARN_ON(!pll->on); + if (--pll->active) return; - } - - DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg); - - /* Make sure transcoder isn't still depending on us */ - assert_pch_transcoder_disabled(dev_priv, intel_crtc->pipe); - - reg = pll->pll_reg; - val = I915_READ(reg); - val &= ~DPLL_VCO_ENABLE; - I915_WRITE(reg, val); - POSTING_READ(reg); - udelay(200); + DRM_DEBUG_KMS("disabling %s\n", pll->name); + pll->disable(dev_priv, pll); pll->on = false; } @@ -1478,15 +1460,15 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, { struct drm_device *dev = dev_priv->dev; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t reg, val, pipeconf_val; /* PCH only available on ILK+ */ BUG_ON(dev_priv->info->gen < 5); /* Make sure PCH DPLL is enabled */ - assert_pch_pll_enabled(dev_priv, - to_intel_crtc(crtc)->pch_pll, - to_intel_crtc(crtc)); + assert_shared_dpll_enabled(dev_priv, + intel_crtc_to_shared_dpll(intel_crtc)); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); @@ -1943,6 +1925,9 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, dspcntr &= ~DISPPLANE_TILED; } + if (IS_G4X(dev)) + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + I915_WRITE(reg, dspcntr); linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); @@ -2212,7 +2197,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, crtc->y = y; if (old_fb) { - intel_wait_for_vblank(dev, intel_crtc->pipe); + if (intel_crtc->active && old_fb != fb) + intel_wait_for_vblank(dev, intel_crtc->pipe); intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); } @@ -2945,31 +2931,18 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) * transcoder, and we actually should do this to not upset any PCH * transcoder that already use the clock when we share it. * - * Note that enable_pch_pll tries to do the right thing, but get_pch_pll - * unconditionally resets the pll - we need that to have the right LVDS - * enable sequence. */ - ironlake_enable_pch_pll(intel_crtc); + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that to have + * the right LVDS enable sequence. */ + ironlake_enable_shared_dpll(intel_crtc); if (HAS_PCH_CPT(dev)) { u32 sel; temp = I915_READ(PCH_DPLL_SEL); - switch (pipe) { - default: - case 0: - temp |= TRANSA_DPLL_ENABLE; - sel = TRANSA_DPLLB_SEL; - break; - case 1: - temp |= TRANSB_DPLL_ENABLE; - sel = TRANSB_DPLLB_SEL; - break; - case 2: - temp |= TRANSC_DPLL_ENABLE; - sel = TRANSC_DPLLB_SEL; - break; - } - if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B) + temp |= TRANS_DPLL_ENABLE(pipe); + sel = TRANS_DPLLB_SEL(pipe); + if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B) temp |= sel; else temp &= ~sel; @@ -3038,69 +3011,72 @@ static void lpt_pch_enable(struct drm_crtc *crtc) lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void intel_put_pch_pll(struct intel_crtc *intel_crtc) +static void intel_put_shared_dpll(struct intel_crtc *crtc) { - struct intel_pch_pll *pll = intel_crtc->pch_pll; + struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); if (pll == NULL) return; if (pll->refcount == 0) { - WARN(1, "bad PCH PLL refcount\n"); + WARN(1, "bad %s refcount\n", pll->name); return; } - --pll->refcount; - intel_crtc->pch_pll = NULL; + if (--pll->refcount == 0) { + WARN_ON(pll->on); + WARN_ON(pll->active); + } + + crtc->config.shared_dpll = DPLL_ID_PRIVATE; } -static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp) +static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp) { - struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; - struct intel_pch_pll *pll; - int i; + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); + enum intel_dpll_id i; - pll = intel_crtc->pch_pll; if (pll) { - DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n", - intel_crtc->base.base.id, pll->pll_reg); - goto prepare; + DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n", + crtc->base.base.id, pll->name); + intel_put_shared_dpll(crtc); } if (HAS_PCH_IBX(dev_priv->dev)) { /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ - i = intel_crtc->pipe; - pll = &dev_priv->pch_plls[i]; + i = crtc->pipe; + pll = &dev_priv->shared_dplls[i]; - DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n", - intel_crtc->base.base.id, pll->pll_reg); + DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", + crtc->base.base.id, pll->name); goto found; } - for (i = 0; i < dev_priv->num_pch_pll; i++) { - pll = &dev_priv->pch_plls[i]; + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + pll = &dev_priv->shared_dplls[i]; /* Only want to check enabled timings first */ if (pll->refcount == 0) continue; - if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) && - fp == I915_READ(pll->fp0_reg)) { - DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n", - intel_crtc->base.base.id, - pll->pll_reg, pll->refcount, pll->active); + if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) && + fp == I915_READ(PCH_FP0(pll->id))) { + DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", + crtc->base.base.id, + pll->name, pll->refcount, pll->active); goto found; } } /* Ok no matching timings, maybe there's a free one? */ - for (i = 0; i < dev_priv->num_pch_pll; i++) { - pll = &dev_priv->pch_plls[i]; + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + pll = &dev_priv->shared_dplls[i]; if (pll->refcount == 0) { - DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n", - intel_crtc->base.base.id, pll->pll_reg); + DRM_DEBUG_KMS("CRTC:%d allocated %s\n", + crtc->base.base.id, pll->name); goto found; } } @@ -3108,20 +3084,28 @@ static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u3 return NULL; found: - intel_crtc->pch_pll = pll; - pll->refcount++; - DRM_DEBUG_DRIVER("using pll %d for pipe %c\n", i, pipe_name(intel_crtc->pipe)); -prepare: /* separate function? */ - DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg); + crtc->config.shared_dpll = i; + DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, + pipe_name(crtc->pipe)); - /* Wait for the clocks to stabilize before rewriting the regs */ - I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); - POSTING_READ(pll->pll_reg); - udelay(150); + if (pll->active == 0) { + memcpy(&pll->hw_state, &crtc->config.dpll_hw_state, + sizeof(pll->hw_state)); + + DRM_DEBUG_DRIVER("setting up %s\n", pll->name); + WARN_ON(pll->on); + assert_shared_dpll_disabled(dev_priv, pll); + + /* Wait for the clocks to stabilize before rewriting the regs */ + I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); + POSTING_READ(PCH_DPLL(pll->id)); + udelay(150); + + I915_WRITE(PCH_FP0(pll->id), fp); + I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE); + } + pll->refcount++; - I915_WRITE(pll->fp0_reg, fp); - I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE); - pll->on = false; return pll; } @@ -3160,6 +3144,28 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc) } } +static void intel_enable_planes(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct intel_plane *intel_plane; + + list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) + if (intel_plane->pipe == pipe) + intel_plane_restore(&intel_plane->base); +} + +static void intel_disable_planes(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct intel_plane *intel_plane; + + list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head) + if (intel_plane->pipe == pipe) + intel_plane_disable(&intel_plane->base); +} + static void ironlake_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -3203,7 +3209,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_enable) encoder->pre_enable(encoder); - /* Enable panel fitting for LVDS */ ironlake_pfit_enable(intel_crtc); /* @@ -3215,6 +3220,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_enable_pipe(dev_priv, pipe, intel_crtc->config.has_pch_encoder); intel_enable_plane(dev_priv, plane, pipe); + intel_enable_planes(crtc); + intel_crtc_update_cursor(crtc, true); if (intel_crtc->config.has_pch_encoder) ironlake_pch_enable(crtc); @@ -3223,8 +3230,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); - intel_crtc_update_cursor(crtc, true); - for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); @@ -3309,7 +3314,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_ddi_enable_pipe_clock(intel_crtc); - /* Enable panel fitting for eDP */ ironlake_pfit_enable(intel_crtc); /* @@ -3324,6 +3328,8 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_enable_pipe(dev_priv, pipe, intel_crtc->config.has_pch_encoder); intel_enable_plane(dev_priv, plane, pipe); + intel_enable_planes(crtc); + intel_crtc_update_cursor(crtc, true); hsw_enable_ips(intel_crtc); @@ -3334,8 +3340,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_update_fbc(dev); mutex_unlock(&dev->struct_mutex); - intel_crtc_update_cursor(crtc, true); - for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); @@ -3384,14 +3388,17 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - intel_crtc_update_cursor(crtc, false); - - intel_disable_plane(dev_priv, plane, pipe); if (dev_priv->cfb_plane == plane) intel_disable_fbc(dev); - intel_set_pch_fifo_underrun_reporting(dev, pipe, false); + intel_crtc_update_cursor(crtc, false); + intel_disable_planes(crtc); + intel_disable_plane(dev_priv, plane, pipe); + + if (intel_crtc->config.has_pch_encoder) + intel_set_pch_fifo_underrun_reporting(dev, pipe, false); + intel_disable_pipe(dev_priv, pipe); ironlake_pfit_disable(intel_crtc); @@ -3400,42 +3407,32 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) if (encoder->post_disable) encoder->post_disable(encoder); - ironlake_fdi_disable(crtc); - - ironlake_disable_pch_transcoder(dev_priv, pipe); - intel_set_pch_fifo_underrun_reporting(dev, pipe, true); + if (intel_crtc->config.has_pch_encoder) { + ironlake_fdi_disable(crtc); - if (HAS_PCH_CPT(dev)) { - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = I915_READ(reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - I915_WRITE(reg, temp); + ironlake_disable_pch_transcoder(dev_priv, pipe); + intel_set_pch_fifo_underrun_reporting(dev, pipe, true); - /* disable DPLL_SEL */ - temp = I915_READ(PCH_DPLL_SEL); - switch (pipe) { - case 0: - temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); - break; - case 1: - temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); - break; - case 2: - /* C shares PLL A or B */ - temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); - break; - default: - BUG(); /* wtf */ + if (HAS_PCH_CPT(dev)) { + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = I915_READ(reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; + I915_WRITE(reg, temp); + + /* disable DPLL_SEL */ + temp = I915_READ(PCH_DPLL_SEL); + temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); + I915_WRITE(PCH_DPLL_SEL, temp); } - I915_WRITE(PCH_DPLL_SEL, temp); - } - /* disable PCH DPLL */ - intel_disable_pch_pll(intel_crtc); + /* disable PCH DPLL */ + intel_disable_shared_dpll(intel_crtc); - ironlake_fdi_pll_disable(intel_crtc); + ironlake_fdi_pll_disable(intel_crtc); + } intel_crtc->active = false; intel_update_watermarks(dev); @@ -3463,7 +3460,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - intel_crtc_update_cursor(crtc, false); /* FBC must be disabled before disabling the plane on HSW. */ if (dev_priv->cfb_plane == plane) @@ -3471,6 +3467,8 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) hsw_disable_ips(intel_crtc); + intel_crtc_update_cursor(crtc, false); + intel_disable_planes(crtc); intel_disable_plane(dev_priv, plane, pipe); if (intel_crtc->config.has_pch_encoder) @@ -3504,7 +3502,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) static void ironlake_crtc_off(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - intel_put_pch_pll(intel_crtc); + intel_put_shared_dpll(intel_crtc); } static void haswell_crtc_off(struct drm_crtc *crtc) @@ -3611,19 +3609,17 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); - /* Enable panel fitting for eDP */ i9xx_pfit_enable(intel_crtc); + intel_crtc_load_lut(crtc); + intel_enable_pipe(dev_priv, pipe, false); intel_enable_plane(dev_priv, plane, pipe); + intel_enable_planes(crtc); + intel_crtc_update_cursor(crtc, true); - intel_crtc_load_lut(crtc); intel_update_fbc(dev); - /* Give the overlay scaler a chance to enable if it's on this pipe */ - intel_crtc_dpms_overlay(intel_crtc, true); - intel_crtc_update_cursor(crtc, true); - mutex_unlock(&dev_priv->dpio_lock); } @@ -3650,20 +3646,22 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) if (encoder->pre_enable) encoder->pre_enable(encoder); - /* Enable panel fitting for LVDS */ i9xx_pfit_enable(intel_crtc); + intel_crtc_load_lut(crtc); + intel_enable_pipe(dev_priv, pipe, false); intel_enable_plane(dev_priv, plane, pipe); + intel_enable_planes(crtc); + /* The fixup needs to happen before cursor is enabled */ if (IS_G4X(dev)) g4x_fixup_plane(dev_priv, pipe); - - intel_crtc_load_lut(crtc); - intel_update_fbc(dev); + intel_crtc_update_cursor(crtc, true); /* Give the overlay scaler a chance to enable if it's on this pipe */ intel_crtc_dpms_overlay(intel_crtc, true); - intel_crtc_update_cursor(crtc, true); + + intel_update_fbc(dev); for_each_encoder_on_crtc(dev, crtc, encoder) encoder->enable(encoder); @@ -3702,13 +3700,15 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* Give the overlay scaler a chance to disable if it's on this pipe */ intel_crtc_wait_for_pending_flips(crtc); drm_vblank_off(dev, pipe); - intel_crtc_dpms_overlay(intel_crtc, false); - intel_crtc_update_cursor(crtc, false); if (dev_priv->cfb_plane == plane) intel_disable_fbc(dev); + intel_crtc_dpms_overlay(intel_crtc, false); + intel_crtc_update_cursor(crtc, false); + intel_disable_planes(crtc); intel_disable_plane(dev_priv, plane, pipe); + intel_disable_pipe(dev_priv, pipe); i9xx_pfit_disable(intel_crtc); @@ -3985,7 +3985,7 @@ static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; - int target_clock, lane, link_bw; + int lane, link_bw, fdi_dotclock; bool setup_ok, needs_recompute = false; retry: @@ -3998,19 +3998,15 @@ retry: */ link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; - if (pipe_config->pixel_target_clock) - target_clock = pipe_config->pixel_target_clock; - else - target_clock = adjusted_mode->clock; + fdi_dotclock = adjusted_mode->clock; + fdi_dotclock /= pipe_config->pixel_multiplier; - lane = ironlake_get_lanes_required(target_clock, link_bw, + lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, pipe_config->pipe_bpp); pipe_config->fdi_lanes = lane; - if (pipe_config->pixel_multiplier > 1) - link_bw *= pipe_config->pixel_multiplier; - intel_link_compute_m_n(pipe_config->pipe_bpp, lane, target_clock, + intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n); setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, @@ -4039,12 +4035,11 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc, pipe_config->pipe_bpp == 24; } -static int intel_crtc_compute_config(struct drm_crtc *crtc, +static int intel_crtc_compute_config(struct intel_crtc *crtc, struct intel_crtc_config *pipe_config) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (HAS_PCH_SPLIT(dev)) { /* FDI link clock is fixed at 2.7G */ @@ -4075,10 +4070,15 @@ static int intel_crtc_compute_config(struct drm_crtc *crtc, } if (IS_HASWELL(dev)) - hsw_compute_ips_config(intel_crtc, pipe_config); + hsw_compute_ips_config(crtc, pipe_config); + + /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old + * clock survives for now. */ + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + pipe_config->shared_dpll = crtc->config.shared_dpll; if (pipe_config->has_pch_encoder) - return ironlake_fdi_compute_config(intel_crtc, pipe_config); + return ironlake_fdi_compute_config(crtc, pipe_config); return 0; } @@ -4239,7 +4239,7 @@ static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) { - return (1 << dpll->n) << 16 | dpll->m1 << 8 | dpll->m2; + return (1 << dpll->n) << 16 | dpll->m2; } static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) @@ -4351,8 +4351,6 @@ static void vlv_update_pll(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_display_mode *adjusted_mode = - &crtc->config.adjusted_mode; struct intel_encoder *encoder; int pipe = crtc->pipe; u32 dpll, mdiv; @@ -4405,7 +4403,7 @@ static void vlv_update_pll(struct intel_crtc *crtc) vlv_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv); /* Set HBR and RBR LPF coefficients */ - if (adjusted_mode->clock == 162000 || + if (crtc->config.port_clock == 162000 || intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) vlv_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f0021); @@ -4459,11 +4457,8 @@ static void vlv_update_pll(struct intel_crtc *crtc) if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) DRM_ERROR("DPLL %d failed to lock\n", pipe); - dpll_md = 0; - if (crtc->config.pixel_multiplier > 1) { - dpll_md = (crtc->config.pixel_multiplier - 1) - << DPLL_MD_UDI_MULTIPLIER_SHIFT; - } + dpll_md = (crtc->config.pixel_multiplier - 1) + << DPLL_MD_UDI_MULTIPLIER_SHIFT; I915_WRITE(DPLL_MD(pipe), dpll_md); POSTING_READ(DPLL_MD(pipe)); @@ -4497,8 +4492,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; - if ((crtc->config.pixel_multiplier > 1) && - (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))) { + if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { dpll |= (crtc->config.pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; } @@ -4561,11 +4555,8 @@ static void i9xx_update_pll(struct intel_crtc *crtc, udelay(150); if (INTEL_INFO(dev)->gen >= 4) { - u32 dpll_md = 0; - if (crtc->config.pixel_multiplier > 1) { - dpll_md = (crtc->config.pixel_multiplier - 1) - << DPLL_MD_UDI_MULTIPLIER_SHIFT; - } + u32 dpll_md = (crtc->config.pixel_multiplier - 1) + << DPLL_MD_UDI_MULTIPLIER_SHIFT; I915_WRITE(DPLL_MD(pipe), dpll_md); } else { /* The pixel multiplier can only be updated once the @@ -4578,7 +4569,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc, } static void i8xx_update_pll(struct intel_crtc *crtc, - struct drm_display_mode *adjusted_mode, intel_clock_t *reduced_clock, int num_connectors) { @@ -4633,14 +4623,15 @@ static void i8xx_update_pll(struct intel_crtc *crtc, I915_WRITE(DPLL(pipe), dpll); } -static void intel_set_pipe_timings(struct intel_crtc *intel_crtc, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) +static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; + struct drm_display_mode *adjusted_mode = + &intel_crtc->config.adjusted_mode; + struct drm_display_mode *mode = &intel_crtc->config.requested_mode; uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end; /* We need to be careful not to changed the adjusted mode, for otherwise @@ -4741,7 +4732,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) struct drm_i915_private *dev_priv = dev->dev_private; uint32_t pipeconf; - pipeconf = I915_READ(PIPECONF(intel_crtc->pipe)); + pipeconf = 0; if (intel_crtc->pipe == 0 && INTEL_INFO(dev)->gen < 4) { /* Enable pixel doubling when the dot clock is > 90% of the (display) @@ -4753,15 +4744,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) if (intel_crtc->config.requested_mode.clock > dev_priv->display.get_display_clock_speed(dev) * 9 / 10) pipeconf |= PIPECONF_DOUBLE_WIDE; - else - pipeconf &= ~PIPECONF_DOUBLE_WIDE; } /* only g4x and later have fancy bpc/dither controls */ if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { - pipeconf &= ~(PIPECONF_BPC_MASK | - PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); - /* Bspec claims that we can't use dithering for 30bpp pipes. */ if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30) pipeconf |= PIPECONF_DITHER_EN | @@ -4789,23 +4775,17 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) pipeconf |= PIPECONF_CXSR_DOWNCLOCK; } else { DRM_DEBUG_KMS("disabling CxSR downclocking\n"); - pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; } } - pipeconf &= ~PIPECONF_INTERLACE_MASK; if (!IS_GEN2(dev) && intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; else pipeconf |= PIPECONF_PROGRESSIVE; - if (IS_VALLEYVIEW(dev)) { - if (intel_crtc->config.limited_color_range) - pipeconf |= PIPECONF_COLOR_RANGE_SELECT; - else - pipeconf &= ~PIPECONF_COLOR_RANGE_SELECT; - } + if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range) + pipeconf |= PIPECONF_COLOR_RANGE_SELECT; I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); POSTING_READ(PIPECONF(intel_crtc->pipe)); @@ -4818,8 +4798,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; struct drm_display_mode *mode = &intel_crtc->config.requested_mode; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; @@ -4850,9 +4828,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); - ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, - &clock); - if (!ok) { + ok = dev_priv->display.find_dpll(limit, crtc, + intel_crtc->config.port_clock, + refclk, NULL, &clock); + if (!ok && !intel_crtc->config.clock_set) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -4867,10 +4846,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ - has_reduced_clock = limit->find_pll(limit, crtc, + has_reduced_clock = + dev_priv->display.find_dpll(limit, crtc, dev_priv->lvds_downclock, - refclk, - &clock, + refclk, &clock, &reduced_clock); } /* Compat-code for transition, will disappear. */ @@ -4883,7 +4862,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, } if (IS_GEN2(dev)) - i8xx_update_pll(intel_crtc, adjusted_mode, + i8xx_update_pll(intel_crtc, has_reduced_clock ? &reduced_clock : NULL, num_connectors); else if (IS_VALLEYVIEW(dev)) @@ -4903,7 +4882,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, dspcntr |= DISPPLANE_SEL_PIPE_B; } - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + intel_set_pipe_timings(intel_crtc); /* pipesrc and dspsize control the size that is scaled from, * which should always be the user's requested size. @@ -4963,6 +4942,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, uint32_t tmp; pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) @@ -4972,6 +4952,23 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pfit_config(crtc, pipe_config); + if (INTEL_INFO(dev)->gen >= 4) { + tmp = I915_READ(DPLL_MD(crtc->pipe)); + pipe_config->pixel_multiplier = + ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) + >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; + } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { + tmp = I915_READ(DPLL(crtc->pipe)); + pipe_config->pixel_multiplier = + ((tmp & SDVO_MULTIPLIER_MASK) + >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; + } else { + /* Note that on i915G/GM the pixel multiplier is in the sdvo + * port and will be fixed up in the encoder->get_config + * function. */ + pipe_config->pixel_multiplier = 1; + } + return true; } @@ -5330,9 +5327,8 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) int pipe = intel_crtc->pipe; uint32_t val; - val = I915_READ(PIPECONF(pipe)); + val = 0; - val &= ~PIPECONF_BPC_MASK; switch (intel_crtc->config.pipe_bpp) { case 18: val |= PIPECONF_6BPC; @@ -5351,11 +5347,9 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) BUG(); } - val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); if (intel_crtc->config.dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - val &= ~PIPECONF_INTERLACE_MASK; if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else @@ -5363,8 +5357,6 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) if (intel_crtc->config.limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; - else - val &= ~PIPECONF_COLOR_RANGE_SELECT; I915_WRITE(PIPECONF(pipe), val); POSTING_READ(PIPECONF(pipe)); @@ -5441,13 +5433,11 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; uint32_t val; - val = I915_READ(PIPECONF(cpu_transcoder)); + val = 0; - val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK); if (intel_crtc->config.dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); - val &= ~PIPECONF_INTERLACE_MASK_HSW; if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) val |= PIPECONF_INTERLACED_ILK; else @@ -5455,10 +5445,12 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) I915_WRITE(PIPECONF(cpu_transcoder), val); POSTING_READ(PIPECONF(cpu_transcoder)); + + I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); + POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); } static bool ironlake_compute_clocks(struct drm_crtc *crtc, - struct drm_display_mode *adjusted_mode, intel_clock_t *clock, bool *has_reduced_clock, intel_clock_t *reduced_clock) @@ -5486,8 +5478,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ limit = intel_limit(crtc, refclk); - ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL, - clock); + ret = dev_priv->display.find_dpll(limit, crtc, + to_intel_crtc(crtc)->config.port_clock, + refclk, NULL, clock); if (!ret) return false; @@ -5498,11 +5491,11 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * by using the FP0/FP1. In such case we will disable the LVDS * downclock feature. */ - *has_reduced_clock = limit->find_pll(limit, crtc, - dev_priv->lvds_downclock, - refclk, - clock, - reduced_clock); + *has_reduced_clock = + dev_priv->display.find_dpll(limit, crtc, + dev_priv->lvds_downclock, + refclk, clock, + reduced_clock); } return true; @@ -5615,10 +5608,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, else dpll |= DPLLB_MODE_DAC_SERIAL; - if (intel_crtc->config.pixel_multiplier > 1) { - dpll |= (intel_crtc->config.pixel_multiplier - 1) - << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; - } + dpll |= (intel_crtc->config.pixel_multiplier - 1) + << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; if (is_sdvo) dpll |= DPLL_DVO_HIGH_SPEED; @@ -5650,7 +5641,7 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, else dpll |= PLL_REF_INPUT_DREFCLK; - return dpll; + return dpll | DPLL_VCO_ENABLE; } static int ironlake_crtc_mode_set(struct drm_crtc *crtc, @@ -5660,9 +5651,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; int num_connectors = 0; @@ -5671,6 +5659,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, bool ok, has_reduced_clock = false; bool is_lvds = false; struct intel_encoder *encoder; + struct intel_shared_dpll *pll; int ret; for_each_encoder_on_crtc(dev, crtc, encoder) { @@ -5686,9 +5675,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); - ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, + ok = ironlake_compute_clocks(crtc, &clock, &has_reduced_clock, &reduced_clock); - if (!ok) { + if (!ok && !intel_crtc->config.clock_set) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } @@ -5706,8 +5695,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ if (intel_crtc->config.has_pch_encoder) { - struct intel_pch_pll *pll; - fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); if (has_reduced_clock) fp2 = i9xx_dpll_compute_fp(&reduced_clock); @@ -5716,14 +5703,21 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, &fp, &reduced_clock, has_reduced_clock ? &fp2 : NULL); - pll = intel_get_pch_pll(intel_crtc, dpll, fp); + intel_crtc->config.dpll_hw_state.dpll = dpll; + intel_crtc->config.dpll_hw_state.fp0 = fp; + if (has_reduced_clock) + intel_crtc->config.dpll_hw_state.fp1 = fp2; + else + intel_crtc->config.dpll_hw_state.fp1 = fp; + + pll = intel_get_shared_dpll(intel_crtc, dpll, fp); if (pll == NULL) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", pipe_name(pipe)); return -EINVAL; } } else - intel_put_pch_pll(intel_crtc); + intel_put_shared_dpll(intel_crtc); if (intel_crtc->config.has_dp_encoder) intel_dp_set_m_n(intel_crtc); @@ -5732,11 +5726,18 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, if (encoder->pre_pll_enable) encoder->pre_pll_enable(encoder); - if (intel_crtc->pch_pll) { - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); + if (is_lvds && has_reduced_clock && i915_powersave) + intel_crtc->lowfreq_avail = true; + else + intel_crtc->lowfreq_avail = false; + + if (intel_crtc->config.has_pch_encoder) { + pll = intel_crtc_to_shared_dpll(intel_crtc); + + I915_WRITE(PCH_DPLL(pll->id), dpll); /* Wait for the clocks to stabilize. */ - POSTING_READ(intel_crtc->pch_pll->pll_reg); + POSTING_READ(PCH_DPLL(pll->id)); udelay(150); /* The pixel multiplier can only be updated once the @@ -5744,20 +5745,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, * * So write it again. */ - I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); - } + I915_WRITE(PCH_DPLL(pll->id), dpll); - intel_crtc->lowfreq_avail = false; - if (intel_crtc->pch_pll) { - if (is_lvds && has_reduced_clock && i915_powersave) { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); - intel_crtc->lowfreq_avail = true; - } else { - I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); - } + if (has_reduced_clock) + I915_WRITE(PCH_FP1(pll->id), fp2); + else + I915_WRITE(PCH_FP1(pll->id), fp); } - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + intel_set_pipe_timings(intel_crtc); if (intel_crtc->config.has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, @@ -5808,6 +5804,14 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc, if (tmp & PF_ENABLE) { pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); + + /* We currently do not free assignements of panel fitters on + * ivb/hsw (since we don't use the higher upscaling modes which + * differentiates them) so just WARN about this case for now. */ + if (IS_GEN7(dev)) { + WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != + PF_PIPE_SEL_IVB(crtc->pipe)); + } } } @@ -5819,12 +5823,15 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, uint32_t tmp; pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->shared_dpll = DPLL_ID_PRIVATE; tmp = I915_READ(PIPECONF(crtc->pipe)); if (!(tmp & PIPECONF_ENABLE)) return false; if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { + struct intel_shared_dpll *pll; + pipe_config->has_pch_encoder = true; tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); @@ -5832,6 +5839,27 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, FDI_DP_PORT_WIDTH_SHIFT) + 1; ironlake_get_fdi_m_n_config(crtc, pipe_config); + + /* XXX: Can't properly read out the pch dpll pixel multiplier + * since we don't have state tracking for pch clocks yet. */ + pipe_config->pixel_multiplier = 1; + + if (HAS_PCH_IBX(dev_priv->dev)) { + pipe_config->shared_dpll = crtc->pipe; + } else { + tmp = I915_READ(PCH_DPLL_SEL); + if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) + pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; + else + pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; + } + + pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; + + WARN_ON(!pll->get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); + } else { + pipe_config->pixel_multiplier = 1; } intel_get_pipe_timings(crtc, pipe_config); @@ -5865,31 +5893,10 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct drm_display_mode *adjusted_mode = - &intel_crtc->config.adjusted_mode; - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; - int pipe = intel_crtc->pipe; int plane = intel_crtc->plane; - int num_connectors = 0; - bool is_cpu_edp = false; - struct intel_encoder *encoder; int ret; - for_each_encoder_on_crtc(dev, crtc, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_EDP: - if (enc_to_dig_port(&encoder->base)->port == PORT_A) - is_cpu_edp = true; - break; - } - - num_connectors++; - } - - WARN(num_connectors != 1, "%d connectors attached to pipe %c\n", - num_connectors, pipe_name(pipe)); - - if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) + if (!intel_ddi_pll_mode_set(crtc)) return -EINVAL; /* Ensure that the cursor is valid for the new mode before changing... */ @@ -5900,7 +5907,7 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc, intel_crtc->lowfreq_avail = false; - intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); + intel_set_pipe_timings(intel_crtc); if (intel_crtc->config.has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, @@ -5931,6 +5938,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, uint32_t tmp; pipe_config->cpu_transcoder = crtc->pipe; + pipe_config->shared_dpll = DPLL_ID_PRIVATE; + tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); if (tmp & TRANS_DDI_FUNC_ENABLE) { enum pipe trans_edp_pipe; @@ -5987,6 +5996,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && (I915_READ(IPS_CTL) & IPS_ENABLE); + pipe_config->pixel_multiplier = 1; + return true; } @@ -6300,6 +6311,9 @@ void intel_crtc_load_lut(struct drm_crtc *crtc) if (!crtc->enabled || !intel_crtc->active) return; + if (!HAS_PCH_SPLIT(dev_priv->dev)) + assert_pll_enabled(dev_priv, pipe); + /* use legacy palette for Ironlake */ if (HAS_PCH_SPLIT(dev)) palreg = LGC_PALETTE(pipe); @@ -6909,8 +6923,10 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) return 0; } - /* XXX: Handle the 100Mhz refclk */ - intel_clock(dev, 96000, &clock); + if (IS_PINEVIEW(dev)) + pineview_clock(96000, &clock); + else + i9xx_clock(96000, &clock); } else { bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); @@ -6922,9 +6938,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) { /* XXX: might not be 66MHz */ - intel_clock(dev, 66000, &clock); + i9xx_clock(66000, &clock); } else - intel_clock(dev, 48000, &clock); + i9xx_clock(48000, &clock); } else { if (dpll & PLL_P1_DIVIDE_BY_TWO) clock.p1 = 2; @@ -6937,7 +6953,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) else clock.p2 = 2; - intel_clock(dev, 48000, &clock); + i9xx_clock(48000, &clock); } } @@ -7068,7 +7084,8 @@ void intel_mark_idle(struct drm_device *dev) } } -void intel_mark_fb_busy(struct drm_i915_gem_object *obj) +void intel_mark_fb_busy(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring) { struct drm_device *dev = obj->base.dev; struct drm_crtc *crtc; @@ -7080,8 +7097,12 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj) if (!crtc->fb) continue; - if (to_intel_framebuffer(crtc->fb)->obj == obj) - intel_increase_pllclock(crtc); + if (to_intel_framebuffer(crtc->fb)->obj != obj) + continue; + + intel_increase_pllclock(crtc); + if (ring && intel_fbc_enabled(dev)) + ring->fbc_dirty = true; } } @@ -7531,7 +7552,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, goto cleanup_pending; intel_disable_fbc(dev); - intel_mark_fb_busy(obj); + intel_mark_fb_busy(obj, NULL); mutex_unlock(&dev->struct_mutex); trace_i915_flip_request(intel_crtc->plane, obj); @@ -7562,28 +7583,6 @@ static struct drm_crtc_helper_funcs intel_helper_funcs = { .load_lut = intel_crtc_load_lut, }; -bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) -{ - struct intel_encoder *other_encoder; - struct drm_crtc *crtc = &encoder->new_crtc->base; - - if (WARN_ON(!crtc)) - return false; - - list_for_each_entry(other_encoder, - &crtc->dev->mode_config.encoder_list, - base.head) { - - if (&other_encoder->new_crtc->base != crtc || - encoder == other_encoder) - continue; - else - return true; - } - - return false; -} - static bool intel_encoder_crtc_ok(struct drm_encoder *encoder, struct drm_crtc *crtc) { @@ -7651,13 +7650,39 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) } } +static void +connected_sink_compute_bpp(struct intel_connector * connector, + struct intel_crtc_config *pipe_config) +{ + int bpp = pipe_config->pipe_bpp; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", + connector->base.base.id, + drm_get_connector_name(&connector->base)); + + /* Don't use an invalid EDID bpc value */ + if (connector->base.display_info.bpc && + connector->base.display_info.bpc * 3 < bpp) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", + bpp, connector->base.display_info.bpc*3); + pipe_config->pipe_bpp = connector->base.display_info.bpc*3; + } + + /* Clamp bpp to 8 on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0 && bpp > 24) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", + bpp); + pipe_config->pipe_bpp = 24; + } +} + static int -pipe_config_set_bpp(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct intel_crtc_config *pipe_config) +compute_baseline_pipe_bpp(struct intel_crtc *crtc, + struct drm_framebuffer *fb, + struct intel_crtc_config *pipe_config) { - struct drm_device *dev = crtc->dev; - struct drm_connector *connector; + struct drm_device *dev = crtc->base.dev; + struct intel_connector *connector; int bpp; switch (fb->pixel_format) { @@ -7700,24 +7725,12 @@ pipe_config_set_bpp(struct drm_crtc *crtc, /* Clamp display bpp to EDID value */ list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { - if (connector->encoder && connector->encoder->crtc != crtc) + base.head) { + if (!connector->new_encoder || + connector->new_encoder->new_crtc != crtc) continue; - /* Don't use an invalid EDID bpc value */ - if (connector->display_info.bpc && - connector->display_info.bpc * 3 < bpp) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", - bpp, connector->display_info.bpc*3); - pipe_config->pipe_bpp = connector->display_info.bpc*3; - } - - /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (connector->display_info.bpc == 0 && bpp > 24) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", - bpp); - pipe_config->pipe_bpp = 24; - } + connected_sink_compute_bpp(connector, pipe_config); } return bpp; @@ -7753,6 +7766,25 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); } +static bool check_encoder_cloning(struct drm_crtc *crtc) +{ + int num_encoders = 0; + bool uncloneable_encoders = false; + struct intel_encoder *encoder; + + list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list, + base.head) { + if (&encoder->new_crtc->base != crtc) + continue; + + num_encoders++; + if (!encoder->cloneable) + uncloneable_encoders = true; + } + + return !(num_encoders > 1 && uncloneable_encoders); +} + static struct intel_crtc_config * intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -7765,6 +7797,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, int plane_bpp, ret = -EINVAL; bool retry = true; + if (!check_encoder_cloning(crtc)) { + DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); + return ERR_PTR(-EINVAL); + } + pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); if (!pipe_config) return ERR_PTR(-ENOMEM); @@ -7772,12 +7809,22 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, drm_mode_copy(&pipe_config->adjusted_mode, mode); drm_mode_copy(&pipe_config->requested_mode, mode); pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe; - - plane_bpp = pipe_config_set_bpp(crtc, fb, pipe_config); + pipe_config->shared_dpll = DPLL_ID_PRIVATE; + + /* Compute a starting value for pipe_config->pipe_bpp taking the source + * plane pixel format and any sink constraints into account. Returns the + * source plane bpp so that dithering can be selected on mismatches + * after encoders and crtc also have had their say. */ + plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), + fb, pipe_config); if (plane_bpp < 0) goto fail; encoder_retry: + /* Ensure the port clock defaults are reset when retrying. */ + pipe_config->port_clock = 0; + pipe_config->pixel_multiplier = 1; + /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. @@ -7806,7 +7853,12 @@ encoder_retry: } } - ret = intel_crtc_compute_config(crtc, pipe_config); + /* Set default port clock if not overwritten by the encoder. Needs to be + * done afterwards in case the encoder adjusts the mode. */ + if (!pipe_config->port_clock) + pipe_config->port_clock = pipe_config->adjusted_mode.clock; + + ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); if (ret < 0) { DRM_DEBUG_KMS("CRTC fixup failed\n"); goto fail; @@ -8002,6 +8054,15 @@ intel_pipe_config_compare(struct drm_device *dev, struct intel_crtc_config *current_config, struct intel_crtc_config *pipe_config) { +#define PIPE_CONF_CHECK_X(name) \ + if (current_config->name != pipe_config->name) { \ + DRM_ERROR("mismatch in " #name " " \ + "(expected 0x%08x, found 0x%08x)\n", \ + current_config->name, \ + pipe_config->name); \ + return false; \ + } + #define PIPE_CONF_CHECK_I(name) \ if (current_config->name != pipe_config->name) { \ DRM_ERROR("mismatch in " #name " " \ @@ -8020,6 +8081,9 @@ intel_pipe_config_compare(struct drm_device *dev, return false; \ } +#define PIPE_CONF_QUIRK(quirk) \ + ((current_config->quirks | pipe_config->quirks) & (quirk)) + PIPE_CONF_CHECK_I(cpu_transcoder); PIPE_CONF_CHECK_I(has_pch_encoder); @@ -8044,17 +8108,22 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start); PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end); + if (!HAS_PCH_SPLIT(dev)) + PIPE_CONF_CHECK_I(pixel_multiplier); + PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, DRM_MODE_FLAG_INTERLACE); - PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, - DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, - DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, - DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, - DRM_MODE_FLAG_NVSYNC); + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { + PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, + DRM_MODE_FLAG_PHSYNC); + PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, + DRM_MODE_FLAG_NHSYNC); + PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, + DRM_MODE_FLAG_PVSYNC); + PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags, + DRM_MODE_FLAG_NVSYNC); + } PIPE_CONF_CHECK_I(requested_mode.hdisplay); PIPE_CONF_CHECK_I(requested_mode.vdisplay); @@ -8069,20 +8138,23 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_I(ips_enabled); + PIPE_CONF_CHECK_I(shared_dpll); + PIPE_CONF_CHECK_X(dpll_hw_state.dpll); + PIPE_CONF_CHECK_X(dpll_hw_state.fp0); + PIPE_CONF_CHECK_X(dpll_hw_state.fp1); + +#undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_FLAGS +#undef PIPE_CONF_QUIRK return true; } -void -intel_modeset_check_state(struct drm_device *dev) +static void +check_connector_state(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; - struct intel_crtc *crtc; - struct intel_encoder *encoder; struct intel_connector *connector; - struct intel_crtc_config pipe_config; list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { @@ -8093,6 +8165,13 @@ intel_modeset_check_state(struct drm_device *dev) WARN(&connector->new_encoder->base != connector->base.encoder, "connector's staged encoder doesn't match current encoder\n"); } +} + +static void +check_encoder_state(struct drm_device *dev) +{ + struct intel_encoder *encoder; + struct intel_connector *connector; list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { @@ -8144,6 +8223,15 @@ intel_modeset_check_state(struct drm_device *dev) tracked_pipe, pipe); } +} + +static void +check_crtc_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + struct intel_crtc_config pipe_config; list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { @@ -8165,9 +8253,8 @@ intel_modeset_check_state(struct drm_device *dev) enabled = true; if (encoder->connectors_active) active = true; - if (encoder->get_config) - encoder->get_config(encoder, &pipe_config); } + WARN(active != crtc->active, "crtc's computed active state doesn't match tracked active state " "(expected %i, found %i)\n", active, crtc->active); @@ -8182,6 +8269,14 @@ intel_modeset_check_state(struct drm_device *dev) if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) active = crtc->active; + list_for_each_entry(encoder, &dev->mode_config.encoder_list, + base.head) { + if (encoder->base.crtc != &crtc->base) + continue; + if (encoder->get_config) + encoder->get_config(encoder, &pipe_config); + } + WARN(crtc->active != active, "crtc active state doesn't match with hw state " "(expected %i, found %i)\n", crtc->active, active); @@ -8197,6 +8292,63 @@ intel_modeset_check_state(struct drm_device *dev) } } +static void +check_shared_dpll_state(struct drm_device *dev) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct intel_crtc *crtc; + struct intel_dpll_hw_state dpll_hw_state; + int i; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + int enabled_crtcs = 0, active_crtcs = 0; + bool active; + + memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); + + DRM_DEBUG_KMS("%s\n", pll->name); + + active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); + + WARN(pll->active > pll->refcount, + "more active pll users than references: %i vs %i\n", + pll->active, pll->refcount); + WARN(pll->active && !pll->on, + "pll in active use but not on in sw tracking\n"); + WARN(pll->on != active, + "pll on state mismatch (expected %i, found %i)\n", + pll->on, active); + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) + enabled_crtcs++; + if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) + active_crtcs++; + } + WARN(pll->active != active_crtcs, + "pll active crtcs mismatch (expected %i, found %i)\n", + pll->active, active_crtcs); + WARN(pll->refcount != enabled_crtcs, + "pll enabled crtcs mismatch (expected %i, found %i)\n", + pll->refcount, enabled_crtcs); + + WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, + sizeof(dpll_hw_state)), + "pll hw state mismatch\n"); + } +} + +void +intel_modeset_check_state(struct drm_device *dev) +{ + check_connector_state(dev); + check_encoder_state(dev); + check_crtc_state(dev); + check_shared_dpll_state(dev); +} + static int __intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y, struct drm_framebuffer *fb) @@ -8633,23 +8785,93 @@ static void intel_cpu_pll_init(struct drm_device *dev) intel_ddi_pll_init(dev); } -static void intel_pch_pll_init(struct drm_device *dev) +static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) { - drm_i915_private_t *dev_priv = dev->dev_private; - int i; + uint32_t val; - if (dev_priv->num_pch_pll == 0) { - DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n"); - return; + val = I915_READ(PCH_DPLL(pll->id)); + hw_state->dpll = val; + hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); + hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); + + return val & DPLL_VCO_ENABLE; +} + +static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t reg, val; + + /* PCH refclock must be enabled first */ + assert_pch_refclk_enabled(dev_priv); + + reg = PCH_DPLL(pll->id); + val = I915_READ(reg); + val |= DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); +} + +static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + uint32_t reg, val; + + /* Make sure no transcoder isn't still depending on us. */ + list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { + if (intel_crtc_to_shared_dpll(crtc) == pll) + assert_pch_transcoder_disabled(dev_priv, crtc->pipe); } - for (i = 0; i < dev_priv->num_pch_pll; i++) { - dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i); - dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i); - dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i); + reg = PCH_DPLL(pll->id); + val = I915_READ(reg); + val &= ~DPLL_VCO_ENABLE; + I915_WRITE(reg, val); + POSTING_READ(reg); + udelay(200); +} + +static char *ibx_pch_dpll_names[] = { + "PCH DPLL A", + "PCH DPLL B", +}; + +static void ibx_pch_dpll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + dev_priv->num_shared_dpll = 2; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + dev_priv->shared_dplls[i].id = i; + dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; + dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; + dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; + dev_priv->shared_dplls[i].get_hw_state = + ibx_pch_dpll_get_hw_state; } } +static void intel_shared_dpll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + ibx_pch_dpll_init(dev); + else + dev_priv->num_shared_dpll = 0; + + BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); + DRM_DEBUG_KMS("%i shared PLLs initialized\n", + dev_priv->num_shared_dpll); +} + static void intel_crtc_init(struct drm_device *dev, int pipe) { drm_i915_private_t *dev_priv = dev->dev_private; @@ -8754,13 +8976,8 @@ static void intel_setup_outputs(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; bool dpd_is_edp = false; - bool has_lvds; - has_lvds = intel_lvds_init(dev); - if (!has_lvds && !HAS_PCH_SPLIT(dev)) { - /* disable the panel fitter on everything but LVDS */ - I915_WRITE(PFIT_CONTROL, 0); - } + intel_lvds_init(dev); if (!IS_ULT(dev)) intel_crt_init(dev); @@ -9015,6 +9232,15 @@ static void intel_init_display(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) + dev_priv->display.find_dpll = g4x_find_best_dpll; + else if (IS_VALLEYVIEW(dev)) + dev_priv->display.find_dpll = vlv_find_best_dpll; + else if (IS_PINEVIEW(dev)) + dev_priv->display.find_dpll = pnv_find_best_dpll; + else + dev_priv->display.find_dpll = i9xx_find_best_dpll; + if (HAS_DDI(dev)) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; @@ -9333,7 +9559,7 @@ void intel_modeset_init(struct drm_device *dev) } intel_cpu_pll_init(dev); - intel_pch_pll_init(dev); + intel_shared_dpll_init(dev); /* Just disable it once at startup */ i915_disable_vga(dev); @@ -9534,17 +9760,14 @@ void i915_redisable_vga(struct drm_device *dev) } } -/* Scan out the current hw modeset state, sanitizes it and maps it into the drm - * and i915 state tracking structures. */ -void intel_modeset_setup_hw_state(struct drm_device *dev, - bool force_restore) +static void intel_modeset_readout_hw_state(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; - struct drm_plane *plane; struct intel_crtc *crtc; struct intel_encoder *encoder; struct intel_connector *connector; + int i; list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { @@ -9560,9 +9783,26 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, crtc->active ? "enabled" : "disabled"); } + /* FIXME: Smash this into the new shared dpll infrastructure. */ if (HAS_DDI(dev)) intel_ddi_setup_hw_pll_state(dev); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + + pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); + pll->active = 0; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, + base.head) { + if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) + pll->active++; + } + pll->refcount = pll->active; + + DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", + pll->name, pll->refcount); + } + list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { pipe = 0; @@ -9599,6 +9839,20 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, drm_get_connector_name(&connector->base), connector->base.encoder ? "enabled" : "disabled"); } +} + +/* Scan out the current hw modeset state, sanitizes it and maps it into the drm + * and i915 state tracking structures. */ +void intel_modeset_setup_hw_state(struct drm_device *dev, + bool force_restore) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe; + struct drm_plane *plane; + struct intel_crtc *crtc; + struct intel_encoder *encoder; + + intel_modeset_readout_hw_state(dev); /* HW state is read out, now we need to sanitize this mess. */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 91a31b3b9829..98686005dcf6 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -677,7 +677,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; int bpp, mode_rate; static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; - int target_clock, link_avail, link_clock; + int link_avail, link_clock; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; @@ -694,8 +694,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_pch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); } - /* We need to take the panel's fixed mode into account. */ - target_clock = adjusted_mode->clock; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return false; @@ -706,12 +704,12 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ - bpp = min_t(int, 8*3, pipe_config->pipe_bpp); + bpp = pipe_config->pipe_bpp; if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp); for (; bpp >= 6*3; bpp -= 2*3) { - mode_rate = intel_dp_link_required(target_clock, bpp); + mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); for (clock = 0; clock <= max_clock; clock++) { for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { @@ -746,18 +744,17 @@ found: intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; - adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); pipe_config->pipe_bpp = bpp; - pipe_config->pixel_target_clock = target_clock; + pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", intel_dp->link_bw, intel_dp->lane_count, - adjusted_mode->clock, bpp); + pipe_config->port_clock, bpp); DRM_DEBUG_KMS("DP link bw required %i available %i\n", mode_rate, link_avail); intel_link_compute_m_n(bpp, lane_count, - target_clock, adjusted_mode->clock, + adjusted_mode->clock, pipe_config->port_clock, &pipe_config->dp_m_n); intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); @@ -780,24 +777,28 @@ void intel_dp_init_link_config(struct intel_dp *intel_dp) } } -static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) +static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp) { - struct drm_device *dev = crtc->dev; + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); + struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 dpa_ctl; - DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); + DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock); dpa_ctl = I915_READ(DP_A); dpa_ctl &= ~DP_PLL_FREQ_MASK; - if (clock < 200000) { + if (crtc->config.port_clock == 162000) { /* For a long time we've carried around a ILK-DevA w/a for the * 160MHz clock. If we're really unlucky, it's still required. */ DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); dpa_ctl |= DP_PLL_FREQ_160MHZ; + intel_dp->DP |= DP_PLL_FREQ_160MHZ; } else { dpa_ctl |= DP_PLL_FREQ_270MHZ; + intel_dp->DP |= DP_PLL_FREQ_270MHZ; } I915_WRITE(DP_A, dpa_ctl); @@ -814,8 +815,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); enum port port = dp_to_dig_port(intel_dp)->port; - struct drm_crtc *crtc = encoder->crtc; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); /* * There are four kinds of DP registers: @@ -845,7 +845,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_dp->has_audio) { DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", - pipe_name(intel_crtc->pipe)); + pipe_name(crtc->pipe)); intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; intel_write_eld(encoder, adjusted_mode); } @@ -864,13 +864,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) intel_dp->DP |= DP_ENHANCED_FRAMING; - intel_dp->DP |= intel_crtc->pipe << 29; - - /* don't miss out required setting for eDP */ - if (adjusted_mode->clock < 200000) - intel_dp->DP |= DP_PLL_FREQ_160MHZ; - else - intel_dp->DP |= DP_PLL_FREQ_270MHZ; + intel_dp->DP |= crtc->pipe << 29; } else if (!HAS_PCH_CPT(dev) || port == PORT_A) { if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev)) intel_dp->DP |= intel_dp->color_range; @@ -884,22 +878,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) intel_dp->DP |= DP_ENHANCED_FRAMING; - if (intel_crtc->pipe == 1) + if (crtc->pipe == 1) intel_dp->DP |= DP_PIPEB_SELECT; - - if (port == PORT_A && !IS_VALLEYVIEW(dev)) { - /* don't miss out required setting for eDP */ - if (adjusted_mode->clock < 200000) - intel_dp->DP |= DP_PLL_FREQ_160MHZ; - else - intel_dp->DP |= DP_PLL_FREQ_270MHZ; - } } else { intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; } if (port == PORT_A && !IS_VALLEYVIEW(dev)) - ironlake_set_pll_edp(crtc, adjusted_mode->clock); + ironlake_set_pll_cpu_edp(intel_dp); } #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index fdf6303be0a9..ffe9d35b37b4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -140,7 +140,8 @@ struct intel_encoder { * it is connected to in the pipe parameter. */ bool (*get_hw_state)(struct intel_encoder *, enum pipe *pipe); /* Reconstructs the equivalent mode flags for the current hardware - * state. */ + * state. This must be called _after_ display->get_pipe_config has + * pre-filled the pipe config. */ void (*get_config)(struct intel_encoder *, struct intel_crtc_config *pipe_config); int crtc_mask; @@ -193,6 +194,17 @@ typedef struct dpll { } intel_clock_t; struct intel_crtc_config { + /** + * quirks - bitfield with hw state readout quirks + * + * For various reasons the hw state readout code might not be able to + * completely faithfully read out the current state. These cases are + * tracked with quirk flags so that fastboot and state checker can act + * accordingly. + */ +#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ + unsigned long quirks; + struct drm_display_mode requested_mode; struct drm_display_mode adjusted_mode; /* This flag must be set by the encoder's compute_config callback if it @@ -241,14 +253,21 @@ struct intel_crtc_config { * haswell. */ struct dpll dpll; + /* Selected dpll when shared or DPLL_ID_PRIVATE. */ + enum intel_dpll_id shared_dpll; + + /* Actual register state of the dpll, for shared dpll cross-checking. */ + struct intel_dpll_hw_state dpll_hw_state; + int pipe_bpp; struct intel_link_m_n dp_m_n; - /** - * This is currently used by DP and HDMI encoders since those can have a - * target pixel clock != the port link clock (which is currently stored - * in adjusted_mode->clock). + + /* + * Frequence the dpll for the port should run at. Differs from the + * adjusted dotclock e.g. for DP or 12bpc hdmi mode. */ - int pixel_target_clock; + int port_clock; + /* Used by SDVO (and if we ever fix it, HDMI). */ unsigned pixel_multiplier; @@ -304,8 +323,6 @@ struct intel_crtc { struct intel_crtc_config config; - /* We can share PLLs across outputs if the timings match */ - struct intel_pch_pll *pch_pll; uint32_t ddi_pll_sel; /* reset counter value when the last flip was submitted */ @@ -562,9 +579,10 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, extern void intel_dvo_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev); extern void intel_mark_busy(struct drm_device *dev); -extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); +extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj, + struct intel_ring_buffer *ring); extern void intel_mark_idle(struct drm_device *dev); -extern bool intel_lvds_init(struct drm_device *dev); +extern void intel_lvds_init(struct drm_device *dev); extern bool intel_is_dual_link_lvds(struct drm_device *dev); extern void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); @@ -628,11 +646,11 @@ extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_crtc_update_dpms(struct drm_crtc *crtc); extern void intel_encoder_destroy(struct drm_encoder *encoder); extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); -extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); extern void intel_connector_dpms(struct drm_connector *, int mode); extern bool intel_connector_get_hw_state(struct intel_connector *connector); extern void intel_modeset_check_state(struct drm_device *dev); extern void intel_plane_restore(struct drm_plane *plane); +extern void intel_plane_disable(struct drm_plane *plane); static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector) @@ -767,6 +785,10 @@ extern void intel_update_fbc(struct drm_device *dev); extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); extern void intel_gpu_ips_teardown(void); +/* Power well */ +extern int i915_init_power_well(struct drm_device *dev); +extern void i915_remove_power_well(struct drm_device *dev); + extern bool intel_display_power_enabled(struct drm_device *dev, enum intel_display_power_domain domain); extern void intel_init_power_well(struct drm_device *dev); @@ -786,7 +808,7 @@ extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc); extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc); extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev); -extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock); +extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc); extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc); extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc); extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder); diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 3b03c3c6cc5d..dff669e2387f 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c @@ -60,8 +60,9 @@ static struct fb_ops intelfb_ops = { static int intelfb_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { - struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; - struct drm_device *dev = ifbdev->helper.dev; + struct intel_fbdev *ifbdev = + container_of(helper, struct intel_fbdev, helper); + struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct fb_info *info; struct drm_framebuffer *fb; @@ -108,7 +109,7 @@ static int intelfb_create(struct drm_fb_helper *helper, goto out_unpin; } - info->par = ifbdev; + info->par = helper; ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj); if (ret) @@ -217,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, int intel_fbdev_init(struct drm_device *dev) { struct intel_fbdev *ifbdev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); @@ -242,7 +243,7 @@ int intel_fbdev_init(struct drm_device *dev) void intel_fbdev_initial_config(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; /* Due to peculiar init order wrt to hpd handling this is separate. */ drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); @@ -250,7 +251,7 @@ void intel_fbdev_initial_config(struct drm_device *dev) void intel_fbdev_fini(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (!dev_priv->fbdev) return; @@ -261,7 +262,7 @@ void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_set_suspend(struct drm_device *dev, int state) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; @@ -274,7 +275,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state) * been restored from swap. If the object is stolen however, it will be * full of whatever garbage was left in there. */ - if (!state && ifbdev->ifb.obj->stolen) + if (state == FBINFO_STATE_RUNNING && ifbdev->ifb.obj->stolen) memset_io(info->screen_base, 0, info->screen_size); fb_set_suspend(info, state); @@ -284,14 +285,14 @@ MODULE_LICENSE("GPL and additional rights"); void intel_fb_output_poll_changed(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); } void intel_fb_restore_mode(struct drm_device *dev) { int ret; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->num_pipes == 0) return; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 8062a92e6e80..bc12518a21b4 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -835,9 +835,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, desired_bpp = 12*3; /* Need to adjust the port link by 1.5x for 12bpc. */ - adjusted_mode->clock = clock_12bpc; - pipe_config->pixel_target_clock = - pipe_config->requested_mode.clock; + pipe_config->port_clock = clock_12bpc; } else { DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); desired_bpp = 8*3; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 0ef8b4dc835f..2abb2d3c727b 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -264,9 +264,6 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, return false; } - if (intel_encoder_check_is_cloned(&lvds_encoder->base)) - return false; - if ((I915_READ(lvds_encoder->reg) & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) lvds_bpp = 8*3; @@ -880,7 +877,7 @@ static bool intel_lvds_supported(struct drm_device *dev) * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). */ -bool intel_lvds_init(struct drm_device *dev) +void intel_lvds_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder; @@ -898,35 +895,35 @@ bool intel_lvds_init(struct drm_device *dev) u8 pin; if (!intel_lvds_supported(dev)) - return false; + return; /* Skip init on machines we know falsely report LVDS */ if (dmi_check_system(intel_no_lvds)) - return false; + return; pin = GMBUS_PORT_PANEL; if (!lvds_is_present_in_vbt(dev, &pin)) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); - return false; + return; } if (HAS_PCH_SPLIT(dev)) { if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) - return false; + return; if (dev_priv->vbt.edp_support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); - return false; + return; } } lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL); if (!lvds_encoder) - return false; + return; lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL); if (!lvds_connector) { kfree(lvds_encoder); - return false; + return; } lvds_encoder->attached_connector = lvds_connector; @@ -1097,7 +1094,7 @@ out: intel_panel_init(&intel_connector->panel, fixed_mode); intel_panel_setup_backlight(connector); - return true; + return; failed: DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); @@ -1107,5 +1104,5 @@ failed: drm_mode_destroy(dev, fixed_mode); kfree(lvds_encoder); kfree(lvds_connector); - return false; + return; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index 5c2d6939600e..79be7cfd3152 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -312,7 +312,7 @@ static void intel_didl_outputs(struct drm_device *dev) list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { if (i >= 8) { dev_printk(KERN_ERR, &dev->pdev->dev, - "More than 8 outputs detected\n"); + "More than 8 outputs detected via ACPI\n"); return; } status = @@ -339,7 +339,7 @@ blind_set: int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { dev_printk(KERN_ERR, &dev->pdev->dev, - "More than 8 outputs detected\n"); + "More than 8 outputs in connector list\n"); return; } switch (connector->connector_type) { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 836794b68fc6..a3698812e9c7 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, int ret; BUG_ON(overlay->last_flip_req); - ret = i915_add_request(ring, NULL, &overlay->last_flip_req); + ret = i915_add_request(ring, &overlay->last_flip_req); if (ret) return ret; @@ -286,7 +286,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, intel_ring_emit(ring, flip_addr); intel_ring_advance(ring); - return i915_add_request(ring, NULL, &overlay->last_flip_req); + return i915_add_request(ring, &overlay->last_flip_req); } static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 49a188718f9d..b27bda07f4ae 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -274,7 +274,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval) struct drm_i915_gem_object *obj = intel_fb->obj; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); + I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset); I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X | IVB_DPFC_CTL_FENCE_EN | @@ -431,7 +431,7 @@ void intel_disable_fbc(struct drm_device *dev) * - no pixel mulitply/line duplication * - no alpha buffer discard * - no dual wide - * - framebuffer <= 2048 in width, 1536 in height + * - framebuffer <= max_hdisplay in width, max_vdisplay in height * * We can't assume that any compression will take place (worst case), * so the compressed buffer has to be the same size as the uncompressed @@ -449,6 +449,7 @@ void intel_update_fbc(struct drm_device *dev) struct intel_framebuffer *intel_fb; struct drm_i915_gem_object *obj; int enable_fbc; + unsigned int max_hdisplay, max_vdisplay; if (!i915_powersave) return; @@ -507,8 +508,16 @@ void intel_update_fbc(struct drm_device *dev) dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; goto out_disable; } - if ((crtc->mode.hdisplay > 2048) || - (crtc->mode.vdisplay > 1536)) { + + if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { + max_hdisplay = 4096; + max_vdisplay = 2048; + } else { + max_hdisplay = 2048; + max_vdisplay = 1536; + } + if ((crtc->mode.hdisplay > max_hdisplay) || + (crtc->mode.vdisplay > max_vdisplay)) { DRM_DEBUG_KMS("mode too large for compression, disabling\n"); dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; goto out_disable; @@ -2078,10 +2087,7 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pixel_rate, pfit_size; - if (intel_crtc->config.pixel_target_clock) - pixel_rate = intel_crtc->config.pixel_target_clock; - else - pixel_rate = intel_crtc->config.adjusted_mode.clock; + pixel_rate = intel_crtc->config.adjusted_mode.clock; /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to * adjust the pixel_rate here. */ @@ -4381,6 +4387,19 @@ static void ibx_init_clock_gating(struct drm_device *dev) I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); } +static void g4x_disable_trickle_feed(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; + + for_each_pipe(pipe) { + I915_WRITE(DSPCNTR(pipe), + I915_READ(DSPCNTR(pipe)) | + DISPPLANE_TRICKLE_FEED_DISABLE); + intel_flush_display_plane(dev_priv, pipe); + } +} + static void ironlake_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4444,6 +4463,8 @@ static void ironlake_init_clock_gating(struct drm_device *dev) I915_WRITE(CACHE_MODE_0, _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); + g4x_disable_trickle_feed(dev); + ibx_init_clock_gating(dev); } @@ -4498,7 +4519,6 @@ static void gen6_check_mch_setup(struct drm_device *dev) static void gen6_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); @@ -4574,12 +4594,7 @@ static void gen6_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | GEN6_MBCTL_ENABLE_BOOT_FETCH); - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } + g4x_disable_trickle_feed(dev); /* The default value should be 0x200 according to docs, but the two * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */ @@ -4640,7 +4655,6 @@ static void lpt_suspend_hw(struct drm_device *dev) static void haswell_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; I915_WRITE(WM3_LP_ILK, 0); I915_WRITE(WM2_LP_ILK, 0); @@ -4666,12 +4680,7 @@ static void haswell_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } + g4x_disable_trickle_feed(dev); /* WaVSRefCountFullforceMissDisable:hsw */ gen7_setup_fixed_func_scheduler(dev_priv); @@ -4697,7 +4706,6 @@ static void haswell_init_clock_gating(struct drm_device *dev) static void ivybridge_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; uint32_t snpcr; I915_WRITE(WM3_LP_ILK, 0); @@ -4766,12 +4774,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } + g4x_disable_trickle_feed(dev); /* WaMbcDriverBootEnable:ivb */ I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) | @@ -4798,13 +4801,8 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - int pipe; - - I915_WRITE(WM3_LP_ILK, 0); - I915_WRITE(WM2_LP_ILK, 0); - I915_WRITE(WM1_LP_ILK, 0); - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, @@ -4875,12 +4873,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE); - for_each_pipe(pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); - intel_flush_display_plane(dev_priv, pipe); - } + I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); I915_WRITE(CACHE_MODE_1, _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE)); @@ -4922,6 +4915,8 @@ static void g4x_init_clock_gating(struct drm_device *dev) /* WaDisableRenderCachePipelinedFlush */ I915_WRITE(CACHE_MODE_0, _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE)); + + g4x_disable_trickle_feed(dev); } static void crestline_init_clock_gating(struct drm_device *dev) @@ -4933,6 +4928,8 @@ static void crestline_init_clock_gating(struct drm_device *dev) I915_WRITE(DSPCLK_GATE_D, 0); I915_WRITE(RAMCLK_GATE_D, 0); I915_WRITE16(DEUC, 0); + I915_WRITE(MI_ARB_STATE, + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); } static void broadwater_init_clock_gating(struct drm_device *dev) @@ -4945,6 +4942,8 @@ static void broadwater_init_clock_gating(struct drm_device *dev) I965_ISC_CLOCK_GATE_DISABLE | I965_FBC_CLOCK_GATE_DISABLE); I915_WRITE(RENCLK_GATE_D2, 0); + I915_WRITE(MI_ARB_STATE, + _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); } static void gen3_init_clock_gating(struct drm_device *dev) @@ -5022,18 +5021,12 @@ bool intel_display_power_enabled(struct drm_device *dev, } } -void intel_set_power_well(struct drm_device *dev, bool enable) +static void __intel_set_power_well(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; bool is_enabled, enable_requested; uint32_t tmp; - if (!HAS_POWER_WELL(dev)) - return; - - if (!i915_disable_power_well && !enable) - return; - tmp = I915_READ(HSW_PWR_WELL_DRIVER); is_enabled = tmp & HSW_PWR_WELL_STATE; enable_requested = tmp & HSW_PWR_WELL_ENABLE; @@ -5056,6 +5049,79 @@ void intel_set_power_well(struct drm_device *dev, bool enable) } } +static struct i915_power_well *hsw_pwr; + +/* Display audio driver power well request */ +void i915_request_power_well(void) +{ + if (WARN_ON(!hsw_pwr)) + return; + + spin_lock_irq(&hsw_pwr->lock); + if (!hsw_pwr->count++ && + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, true); + spin_unlock_irq(&hsw_pwr->lock); +} +EXPORT_SYMBOL_GPL(i915_request_power_well); + +/* Display audio driver power well release */ +void i915_release_power_well(void) +{ + if (WARN_ON(!hsw_pwr)) + return; + + spin_lock_irq(&hsw_pwr->lock); + WARN_ON(!hsw_pwr->count); + if (!--hsw_pwr->count && + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, false); + spin_unlock_irq(&hsw_pwr->lock); +} +EXPORT_SYMBOL_GPL(i915_release_power_well); + +int i915_init_power_well(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + hsw_pwr = &dev_priv->power_well; + + hsw_pwr->device = dev; + spin_lock_init(&hsw_pwr->lock); + hsw_pwr->count = 0; + + return 0; +} + +void i915_remove_power_well(struct drm_device *dev) +{ + hsw_pwr = NULL; +} + +void intel_set_power_well(struct drm_device *dev, bool enable) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_power_well *power_well = &dev_priv->power_well; + + if (!HAS_POWER_WELL(dev)) + return; + + if (!i915_disable_power_well && !enable) + return; + + spin_lock_irq(&power_well->lock); + power_well->i915_request = enable; + + /* only reject "disable" power well request */ + if (power_well->count && !enable) { + spin_unlock_irq(&power_well->lock); + return; + } + + __intel_set_power_well(dev, enable); + spin_unlock_irq(&power_well->lock); +} + /* * Starting with Haswell, we have a "Power Down Well" that can be turned off * when not needed anymore. We have 4 registers that can request the power well diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0e72da6ad0fa..e51ab552046c 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -280,6 +280,27 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) return 0; } +static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) +{ + int ret; + + if (!ring->fbc_dirty) + return 0; + + ret = intel_ring_begin(ring, 4); + if (ret) + return ret; + intel_ring_emit(ring, MI_NOOP); + /* WaFbcNukeOn3DBlt:ivb/hsw */ + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit(ring, MSG_FBC_REND_STATE); + intel_ring_emit(ring, value); + intel_ring_advance(ring); + + ring->fbc_dirty = false; + return 0; +} + static int gen7_render_ring_flush(struct intel_ring_buffer *ring, u32 invalidate_domains, u32 flush_domains) @@ -336,6 +357,9 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring, intel_ring_emit(ring, 0); intel_ring_advance(ring); + if (flush_domains) + return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); + return 0; } @@ -429,6 +453,8 @@ static int init_ring_common(struct intel_ring_buffer *ring) ring->last_retired_head = -1; } + memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); + out: if (HAS_FORCE_WAKE(dev)) gen6_gt_force_wake_put(dev_priv); @@ -1486,7 +1512,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring) /* We need to add any requests required to flush the objects and ring */ if (ring->outstanding_lazy_request) { - ret = i915_add_request(ring, NULL, NULL); + ret = i915_add_request(ring, NULL); if (ret) return ret; } @@ -1685,6 +1711,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, static int gen6_ring_flush(struct intel_ring_buffer *ring, u32 invalidate, u32 flush) { + struct drm_device *dev = ring->dev; uint32_t cmd; int ret; @@ -1707,6 +1734,10 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring, intel_ring_emit(ring, 0); intel_ring_emit(ring, MI_NOOP); intel_ring_advance(ring); + + if (IS_GEN7(dev) && flush) + return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); + return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 022d07e43d12..799f04c9da45 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -37,8 +37,14 @@ struct intel_hw_status_page { #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) +enum intel_ring_hangcheck_action { wait, active, kick, hung }; + struct intel_ring_hangcheck { + bool deadlock; u32 seqno; + u32 acthd; + int score; + enum intel_ring_hangcheck_action action; }; struct intel_ring_buffer { @@ -138,6 +144,7 @@ struct intel_ring_buffer { */ u32 outstanding_lazy_request; bool gpu_caches_dirty; + bool fbc_dirty; wait_queue_head_t irq_queue; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index c55841937705..2628d5622449 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -80,7 +80,7 @@ struct intel_sdvo { /* * Capabilities of the SDVO device returned by - * i830_sdvo_get_capabilities() + * intel_sdvo_get_capabilities() */ struct intel_sdvo_caps caps; @@ -1219,6 +1219,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder) switch (intel_crtc->config.pixel_multiplier) { default: + WARN(1, "unknown pixel mutlipler specified\n"); case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; @@ -1276,7 +1277,7 @@ static bool intel_sdvo_connector_get_hw_state(struct intel_connector *connector) struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(&connector->base); struct intel_sdvo *intel_sdvo = intel_attached_sdvo(&connector->base); - u16 active_outputs; + u16 active_outputs = 0; intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); @@ -1292,7 +1293,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); - u16 active_outputs; + u16 active_outputs = 0; u32 tmp; tmp = I915_READ(intel_sdvo->sdvo_reg); @@ -1312,28 +1313,69 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, static void intel_sdvo_get_config(struct intel_encoder *encoder, struct intel_crtc_config *pipe_config) { + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); struct intel_sdvo_dtd dtd; - u32 flags = 0; + int encoder_pixel_multiplier = 0; + u32 flags = 0, sdvox; + u8 val; bool ret; ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); if (!ret) { + /* Some sdvo encoders are not spec compliant and don't + * implement the mandatory get_timings function. */ DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n"); - return; + pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS; + } else { + if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) + flags |= DRM_MODE_FLAG_PHSYNC; + else + flags |= DRM_MODE_FLAG_NHSYNC; + + if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) + flags |= DRM_MODE_FLAG_PVSYNC; + else + flags |= DRM_MODE_FLAG_NVSYNC; } - if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE) - flags |= DRM_MODE_FLAG_PHSYNC; - else - flags |= DRM_MODE_FLAG_NHSYNC; + pipe_config->adjusted_mode.flags |= flags; - if (dtd.part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE) - flags |= DRM_MODE_FLAG_PVSYNC; - else - flags |= DRM_MODE_FLAG_NVSYNC; + /* + * pixel multiplier readout is tricky: Only on i915g/gm it is stored in + * the sdvo port register, on all other platforms it is part of the dpll + * state. Since the general pipe state readout happens before the + * encoder->get_config we so already have a valid pixel multplier on all + * other platfroms. + */ + if (IS_I915G(dev) || IS_I915GM(dev)) { + sdvox = I915_READ(intel_sdvo->sdvo_reg); + pipe_config->pixel_multiplier = + ((sdvox & SDVO_PORT_MULTIPLY_MASK) + >> SDVO_PORT_MULTIPLY_SHIFT) + 1; + } - pipe_config->adjusted_mode.flags |= flags; + /* Cross check the port pixel multiplier with the sdvo encoder state. */ + intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1); + switch (val) { + case SDVO_CLOCK_RATE_MULT_1X: + encoder_pixel_multiplier = 1; + break; + case SDVO_CLOCK_RATE_MULT_2X: + encoder_pixel_multiplier = 2; + break; + case SDVO_CLOCK_RATE_MULT_4X: + encoder_pixel_multiplier = 4; + break; + } + + if(HAS_PCH_SPLIT(dev)) + return; /* no pixel multiplier readout support yet */ + + WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier, + "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n", + pipe_config->pixel_multiplier, encoder_pixel_multiplier); } static void intel_disable_sdvo(struct intel_encoder *encoder) @@ -2819,7 +2861,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *intel_encoder; struct intel_sdvo *intel_sdvo; - u32 hotplug_mask; int i; intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); if (!intel_sdvo) @@ -2848,18 +2889,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) } } - hotplug_mask = 0; - if (IS_G4X(dev)) { - hotplug_mask = intel_sdvo->is_sdvob ? - SDVOB_HOTPLUG_INT_STATUS_G4X : SDVOC_HOTPLUG_INT_STATUS_G4X; - } else if (IS_GEN4(dev)) { - hotplug_mask = intel_sdvo->is_sdvob ? - SDVOB_HOTPLUG_INT_STATUS_I965 : SDVOC_HOTPLUG_INT_STATUS_I965; - } else { - hotplug_mask = intel_sdvo->is_sdvob ? - SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; - } - intel_encoder->compute_config = intel_sdvo_compute_config; intel_encoder->disable = intel_disable_sdvo; intel_encoder->mode_set = intel_sdvo_mode_set; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 04d38d4d811a..1fa5612a4572 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -957,6 +957,14 @@ void intel_plane_restore(struct drm_plane *plane) intel_plane->src_w, intel_plane->src_h); } +void intel_plane_disable(struct drm_plane *plane) +{ + if (!plane->crtc || !plane->fb) + return; + + intel_disable_plane(plane); +} + static const struct drm_plane_funcs intel_plane_funcs = { .update_plane = intel_update_plane, .disable_plane = intel_disable_plane, diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 7d11a5adc985..39debd80d190 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -914,9 +914,6 @@ intel_tv_compute_config(struct intel_encoder *encoder, if (!tv_mode) return false; - if (intel_encoder_check_is_cloned(&intel_tv->base)) - return false; - pipe_config->adjusted_mode.clock = tv_mode->clock; DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); pipe_config->pipe_bpp = 8*3; diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h new file mode 100644 index 000000000000..cfdc884405b7 --- /dev/null +++ b/include/drm/i915_powerwell.h @@ -0,0 +1,36 @@ +/************************************************************************** + * + * Copyright 2013 Intel Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * + **************************************************************************/ + +#ifndef _I915_POWERWELL_H_ +#define _I915_POWERWELL_H_ + +/* For use by hda_i915 driver */ +extern void i915_request_power_well(void); +extern void i915_release_power_well(void); + +#endif /* _I915_POWERWELL_H_ */ diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig index 80a7d44bcf81..c5a872ca7703 100644 --- a/sound/pci/hda/Kconfig +++ b/sound/pci/hda/Kconfig @@ -152,6 +152,16 @@ config SND_HDA_CODEC_HDMI snd-hda-codec-hdmi. This module is automatically loaded at probing. +config SND_HDA_I915 + bool "Build Display HD-audio controller/codec power well support for i915 cards" + depends on DRM_I915 + help + Say Y here to include full HDMI and DisplayPort HD-audio controller/codec + power-well support for Intel Haswell graphics cards based on the i915 driver. + + Note that this option must be enabled for Intel Haswell C+ stepping machines, otherwise + the GPU audio controller/codecs will not be initialized or damaged when exit from S3 mode. + config SND_HDA_CODEC_CIRRUS bool "Build Cirrus Logic codec support" default y diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile index 24a251497a1f..c091438286a3 100644 --- a/sound/pci/hda/Makefile +++ b/sound/pci/hda/Makefile @@ -1,4 +1,6 @@ snd-hda-intel-objs := hda_intel.o +# for haswell power well +snd-hda-intel-$(CONFIG_SND_HDA_I915) += hda_i915.o snd-hda-codec-y := hda_codec.o hda_jack.o hda_auto_parser.o snd-hda-codec-$(CONFIG_SND_HDA_GENERIC) += hda_generic.o diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c new file mode 100644 index 000000000000..76c13d5b3ca0 --- /dev/null +++ b/sound/pci/hda/hda_i915.c @@ -0,0 +1,75 @@ +/* + * hda_i915.c - routines for Haswell HDA controller power well support + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <sound/core.h> +#include <drm/i915_powerwell.h> +#include "hda_i915.h" + +static void (*get_power)(void); +static void (*put_power)(void); + +void hda_display_power(bool enable) +{ + if (!get_power || !put_power) + return; + + snd_printdd("HDA display power %s \n", + enable ? "Enable" : "Disable"); + if (enable) + get_power(); + else + put_power(); +} + +int hda_i915_init(void) +{ + int err = 0; + + get_power = symbol_request(i915_request_power_well); + if (!get_power) { + snd_printk(KERN_WARNING "hda-i915: get_power symbol get fail\n"); + return -ENODEV; + } + + put_power = symbol_request(i915_release_power_well); + if (!put_power) { + symbol_put(i915_request_power_well); + get_power = NULL; + return -ENODEV; + } + + snd_printd("HDA driver get symbol successfully from i915 module\n"); + + return err; +} + +int hda_i915_exit(void) +{ + if (get_power) { + symbol_put(i915_request_power_well); + get_power = NULL; + } + if (put_power) { + symbol_put(i915_release_power_well); + put_power = NULL; + } + + return 0; +} diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h new file mode 100644 index 000000000000..5a63da2c53e5 --- /dev/null +++ b/sound/pci/hda/hda_i915.h @@ -0,0 +1,35 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#ifndef __SOUND_HDA_I915_H +#define __SOUND_HDA_I915_H + +#ifdef CONFIG_SND_HDA_I915 +void hda_display_power(bool enable); +int hda_i915_init(void); +int hda_i915_exit(void); +#else +static inline void hda_display_power(bool enable) {} +static inline int hda_i915_init(void) +{ + return -ENODEV; +} +static inline int hda_i915_exit(void) +{ + return 0; +} +#endif + +#endif diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index de18722c4873..35e9f8b010a7 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -62,6 +62,7 @@ #include <linux/vga_switcheroo.h> #include <linux/firmware.h> #include "hda_codec.h" +#include "hda_i915.h" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; @@ -541,6 +542,10 @@ struct azx { /* for pending irqs */ struct work_struct irq_pending_work; +#ifdef CONFIG_SND_HDA_I915 + struct work_struct probe_work; +#endif + /* reboot notifier (for mysterious hangup problem at power-down) */ struct notifier_block reboot_notifier; @@ -594,6 +599,7 @@ enum { #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ +#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 power well support */ /* quirks for Intel PCH */ #define AZX_DCAPS_INTEL_PCH_NOPM \ @@ -2900,6 +2906,8 @@ static int azx_suspend(struct device *dev) pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + hda_display_power(false); return 0; } @@ -2912,6 +2920,8 @@ static int azx_resume(struct device *dev) if (chip->disabled) return 0; + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + hda_display_power(true); pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { @@ -2944,6 +2954,8 @@ static int azx_runtime_suspend(struct device *dev) azx_stop_chip(chip); azx_clear_irq_pending(chip); + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + hda_display_power(false); return 0; } @@ -2952,6 +2964,8 @@ static int azx_runtime_resume(struct device *dev) struct snd_card *card = dev_get_drvdata(dev); struct azx *chip = card->private_data; + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) + hda_display_power(true); azx_init_pci(chip); azx_init_chip(chip, 1); return 0; @@ -3006,7 +3020,6 @@ static void azx_notifier_unregister(struct azx *chip) unregister_reboot_notifier(&chip->reboot_notifier); } -static int azx_first_init(struct azx *chip); static int azx_probe_continue(struct azx *chip); #ifdef SUPPORT_VGA_SWITCHEROO @@ -3033,8 +3046,7 @@ static void azx_vs_set_state(struct pci_dev *pci, snd_printk(KERN_INFO SFX "%s: Start delayed initialization\n", pci_name(chip->pci)); - if (azx_first_init(chip) < 0 || - azx_probe_continue(chip) < 0) { + if (azx_probe_continue(chip) < 0) { snd_printk(KERN_ERR SFX "%s: initialization error\n", pci_name(chip->pci)); @@ -3120,8 +3132,13 @@ static int register_vga_switcheroo(struct azx *chip) */ static int azx_free(struct azx *chip) { + struct pci_dev *pci = chip->pci; int i; + if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) + && chip->running) + pm_runtime_get_noresume(&pci->dev); + azx_del_card_list(chip); azx_notifier_unregister(chip); @@ -3173,6 +3190,10 @@ static int azx_free(struct azx *chip) if (chip->fw) release_firmware(chip->fw); #endif + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + hda_display_power(false); + hda_i915_exit(); + } kfree(chip); return 0; @@ -3398,6 +3419,13 @@ static void azx_check_snoop_available(struct azx *chip) } } +#ifdef CONFIG_SND_HDA_I915 +static void azx_probe_work(struct work_struct *work) +{ + azx_probe_continue(container_of(work, struct azx, probe_work)); +} +#endif + /* * constructor */ @@ -3473,7 +3501,13 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci, return err; } +#ifdef CONFIG_SND_HDA_I915 + /* continue probing in work context as may trigger request module */ + INIT_WORK(&chip->probe_work, azx_probe_work); +#endif + *rchip = chip; + return 0; } @@ -3730,11 +3764,6 @@ static int azx_probe(struct pci_dev *pci, } probe_now = !chip->disabled; - if (probe_now) { - err = azx_first_init(chip); - if (err < 0) - goto out_free; - } #ifdef CONFIG_SND_HDA_PATCH_LOADER if (patch[dev] && *patch[dev]) { @@ -3749,15 +3778,22 @@ static int azx_probe(struct pci_dev *pci, } #endif /* CONFIG_SND_HDA_PATCH_LOADER */ + /* continue probing in work context, avoid request_module deadlock */ + if (probe_now && (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)) { +#ifdef CONFIG_SND_HDA_I915 + probe_now = false; + schedule_work(&chip->probe_work); +#else + snd_printk(KERN_ERR SFX "Haswell must build in CONFIG_SND_HDA_I915\n"); +#endif + } + if (probe_now) { err = azx_probe_continue(chip); if (err < 0) goto out_free; } - if (pci_dev_run_wake(pci)) - pm_runtime_put_noidle(&pci->dev); - dev++; complete_all(&chip->probe_wait); return 0; @@ -3770,9 +3806,24 @@ out_free: static int azx_probe_continue(struct azx *chip) { + struct pci_dev *pci = chip->pci; int dev = chip->dev_index; int err; + /* Request power well for Haswell HDA controller and codec */ + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + err = hda_i915_init(); + if (err < 0) { + snd_printk(KERN_ERR SFX "Error request power-well from i915\n"); + goto out_free; + } + hda_display_power(true); + } + + err = azx_first_init(chip); + if (err < 0) + goto out_free; + #ifdef CONFIG_SND_HDA_INPUT_BEEP chip->beep_mode = beep_mode[dev]; #endif @@ -3817,6 +3868,8 @@ static int azx_probe_continue(struct azx *chip) power_down_all_codecs(chip); azx_notifier_register(chip); azx_add_card_list(chip); + if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME) + pm_runtime_put_noidle(&pci->dev); return 0; @@ -3829,9 +3882,6 @@ static void azx_remove(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); - if (pci_dev_run_wake(pci)) - pm_runtime_get_noresume(&pci->dev); - if (card) snd_card_free(card); pci_set_drvdata(pci, NULL); @@ -3864,11 +3914,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0a0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH | + AZX_DCAPS_I915_POWERWELL }, { PCI_DEVICE(0x8086, 0x0c0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH | + AZX_DCAPS_I915_POWERWELL }, { PCI_DEVICE(0x8086, 0x0d0c), - .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH }, + .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH | + AZX_DCAPS_I915_POWERWELL }, /* 5 Series/3400 */ { PCI_DEVICE(0x8086, 0x3b56), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_INTEL_PCH_NOPM }, |