summaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>2006-10-30 22:07:08 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-31 08:06:59 -0800
commit33f775eea185e8df7701c4afc2c8fcee85c83282 (patch)
tree5cbba5e09cba9ab84ddba06cf23e1950ffbec222 /arch/um
parentd7fb2c3865ca0f95d92e2864c3dc9220789d83f5 (diff)
downloadblackbird-op-linux-33f775eea185e8df7701c4afc2c8fcee85c83282.tar.gz
blackbird-op-linux-33f775eea185e8df7701c4afc2c8fcee85c83282.zip
[PATCH] uml ubd driver: ubd_io_lock usage fixup
Add some comments about requirements for ubd_io_lock and expand its use. When an irq signals that the "controller" (i.e. another thread on the host, which does the actual requests and is the only one blocked on I/O on the host) has done some work, we call again the request function ourselves (do_ubd_request). We now do that with ubd_io_lock held - that's useful to protect against concurrent calls to elv_next_request and so on. XXX: Maybe we shouldn't call at all the request function. Input needed on this. Are we supposed to plug and unplug the queue? That code "indirectly" does that by setting a flag, called do_ubd, which makes the request function return (it's a residual of 2.4 block layer interface). Meanwhile, however, merge this patch, which improves things. Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Cc: Jeff Dike <jdike@addtoit.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/drivers/ubd_kern.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index a3061ae39b3b..6cd8988e8fd0 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -106,6 +106,8 @@ static inline void ubd_set_bit(__u64 bit, unsigned char *data)
#define DRIVER_NAME "uml-blkdev"
+/* Can be taken in interrupt context, and is passed to the block layer to lock
+ * the request queue. Kernel side code knows that. */
static DEFINE_SPINLOCK(ubd_io_lock);
static DEFINE_MUTEX(ubd_lock);
@@ -497,6 +499,8 @@ static void __ubd_finish(struct request *req, int error)
end_request(req, 1);
}
+/* Callable only from interrupt context - otherwise you need to do
+ * spin_lock_irq()/spin_lock_irqsave() */
static inline void ubd_finish(struct request *req, int error)
{
spin_lock(&ubd_io_lock);
@@ -504,7 +508,7 @@ static inline void ubd_finish(struct request *req, int error)
spin_unlock(&ubd_io_lock);
}
-/* Called without ubd_io_lock held */
+/* Called without ubd_io_lock held, and only in interrupt context. */
static void ubd_handler(void)
{
struct io_thread_req req;
@@ -525,7 +529,9 @@ static void ubd_handler(void)
ubd_finish(rq, req.error);
reactivate_fd(thread_fd, UBD_IRQ);
+ spin_lock(&ubd_io_lock);
do_ubd_request(ubd_queue);
+ spin_unlock(&ubd_io_lock);
}
static irqreturn_t ubd_intr(int irq, void *dev)
OpenPOWER on IntegriCloud