summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2007-01-26 00:57:11 -0800
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-26 13:51:00 -0800
commit2a2275d630b982e5f90206f9bc497f6695a3ec5d (patch)
tree88cddee709b2107b74e5424810d4ffb6e3772382
parenta0ad13ef643a5829d63c456ab6143bbda60b44a9 (diff)
downloadblackbird-op-linux-2a2275d630b982e5f90206f9bc497f6695a3ec5d.tar.gz
blackbird-op-linux-2a2275d630b982e5f90206f9bc497f6695a3ec5d.zip
[PATCH] md: fix potential memalloc deadlock in md
If a GFP_KERNEL allocation is attempted in md while the mddev_lock is held, it is possible for a deadlock to eventuate. This happens if the array was marked 'clean', and the memalloc triggers a write-out to the md device. For the writeout to succeed, the array must be marked 'dirty', and that requires getting the mddev_lock. So, before attempting a GFP_KERNEL allocation while holding the lock, make sure the array is marked 'dirty' (unless it is currently read-only). Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/md/md.c29
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--include/linux/raid/md.h2
4 files changed, 35 insertions, 1 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ec3d8e8a0bd3..e8807ea5377d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3564,6 +3564,8 @@ static int get_bitmap_file(mddev_t * mddev, void __user * arg)
char *ptr, *buf = NULL;
int err = -ENOMEM;
+ md_allow_write(mddev);
+
file = kmalloc(sizeof(*file), GFP_KERNEL);
if (!file)
goto out;
@@ -5032,6 +5034,33 @@ void md_write_end(mddev_t *mddev)
}
}
+/* md_allow_write(mddev)
+ * Calling this ensures that the array is marked 'active' so that writes
+ * may proceed without blocking. It is important to call this before
+ * attempting a GFP_KERNEL allocation while holding the mddev lock.
+ * Must be called with mddev_lock held.
+ */
+void md_allow_write(mddev_t *mddev)
+{
+ if (!mddev->pers)
+ return;
+ if (mddev->ro)
+ return;
+
+ spin_lock_irq(&mddev->write_lock);
+ if (mddev->in_sync) {
+ mddev->in_sync = 0;
+ set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+ if (mddev->safemode_delay &&
+ mddev->safemode == 0)
+ mddev->safemode = 1;
+ spin_unlock_irq(&mddev->write_lock);
+ md_update_sb(mddev, 0);
+ } else
+ spin_unlock_irq(&mddev->write_lock);
+}
+EXPORT_SYMBOL_GPL(md_allow_write);
+
static DECLARE_WAIT_QUEUE_HEAD(resync_wait);
#define SYNC_MARKS 10
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ab74d40cac98..97ee870b265d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2104,6 +2104,8 @@ static int raid1_reshape(mddev_t *mddev)
return -EINVAL;
}
+ md_allow_write(mddev);
+
raid_disks = mddev->raid_disks + mddev->delta_disks;
if (raid_disks < conf->raid_disks) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index be008f034ada..8a30b297ac3a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -405,6 +405,8 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
if (newsize <= conf->pool_size)
return 0; /* never bother to shrink */
+ md_allow_write(conf->mddev);
+
/* Step 1 */
sc = kmem_cache_create(conf->cache_name[1-conf->active_name],
sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev),
@@ -3250,6 +3252,7 @@ raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
else
break;
}
+ md_allow_write(mddev);
while (new > conf->max_nr_stripes) {
if (grow_one_stripe(conf))
conf->max_nr_stripes++;
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h
index 866a1e2b0ce0..fbaeda79b2e9 100644
--- a/include/linux/raid/md.h
+++ b/include/linux/raid/md.h
@@ -94,7 +94,7 @@ extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
struct page *page, int rw);
extern void md_do_sync(mddev_t *mddev);
extern void md_new_event(mddev_t *mddev);
-
+extern void md_allow_write(mddev_t *mddev);
#endif /* CONFIG_MD */
#endif
OpenPOWER on IntegriCloud