summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-01-14 02:33:39 -0800
committerThomas Hellstrom <thellstrom@vmware.com>2015-01-19 03:02:13 -0800
commit496eb6fd2c3fd13f4b914e537598e5c86ce4f52a (patch)
treed2c1fc2d9384e197f54da6196223d7e640c5e0e9 /drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
parent79305ec6e60d320832505e95c1a028d309fcd2b6 (diff)
downloadblackbird-op-linux-496eb6fd2c3fd13f4b914e537598e5c86ce4f52a.tar.gz
blackbird-op-linux-496eb6fd2c3fd13f4b914e537598e5c86ce4f52a.zip
drm/vmwgfx: Replace the hw mutex with a hw spinlock
Fixes a case where we call vmw_fifo_idle() from within a wait function with task state !TASK_RUNNING, which is illegal. In addition, make the locking fine-grained, so that it is performed once for every read- and write operation. This is of course more costly, but we don't perform much register access in the timing critical paths anyway. Instead we have the extra benefit of being sure that we don't forget the hw lock around register accesses. I think currently the kms code was quite buggy w r t this. This fixes Red Hat Bugzilla Bug 1180796 Cc: stable@vger.kernel.org Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_drv.h')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
OpenPOWER on IntegriCloud