summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c88
1 files changed, 67 insertions, 21 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index a64798ef481e..4d710b7a133b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -10,6 +10,7 @@
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
@@ -17,6 +18,7 @@
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
+#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
static const char *_name = DM_NAME;
@@ -69,6 +71,7 @@ struct mapped_device {
request_queue_t *queue;
struct gendisk *disk;
+ char name[16];
void *interface_ptr;
@@ -101,6 +104,9 @@ struct mapped_device {
*/
struct super_block *frozen_sb;
struct block_device *suspended_bdev;
+
+ /* forced geometry settings */
+ struct hd_geometry geometry;
};
#define MIN_IOS 256
@@ -226,6 +232,13 @@ static int dm_blk_close(struct inode *inode, struct file *file)
return 0;
}
+static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct mapped_device *md = bdev->bd_disk->private_data;
+
+ return dm_get_geometry(md, geo);
+}
+
static inline struct dm_io *alloc_io(struct mapped_device *md)
{
return mempool_alloc(md->io_pool, GFP_NOIO);
@@ -312,6 +325,33 @@ struct dm_table *dm_get_table(struct mapped_device *md)
return t;
}
+/*
+ * Get the geometry associated with a dm device
+ */
+int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
+{
+ *geo = md->geometry;
+
+ return 0;
+}
+
+/*
+ * Set the geometry of a device.
+ */
+int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
+{
+ sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
+
+ if (geo->start > sz) {
+ DMWARN("Start sector is beyond the geometry limits.");
+ return -EINVAL;
+ }
+
+ md->geometry = *geo;
+
+ return 0;
+}
+
/*-----------------------------------------------------------------
* CRUD START:
* A more elegant soln is in the works that uses the queue
@@ -704,14 +744,14 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
/*-----------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
*---------------------------------------------------------------*/
-static DECLARE_MUTEX(_minor_lock);
+static DEFINE_MUTEX(_minor_lock);
static DEFINE_IDR(_minor_idr);
static void free_minor(unsigned int minor)
{
- down(&_minor_lock);
+ mutex_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
- up(&_minor_lock);
+ mutex_unlock(&_minor_lock);
}
/*
@@ -724,7 +764,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
if (minor >= (1 << MINORBITS))
return -EINVAL;
- down(&_minor_lock);
+ mutex_lock(&_minor_lock);
if (idr_find(&_minor_idr, minor)) {
r = -EBUSY;
@@ -749,7 +789,7 @@ static int specific_minor(struct mapped_device *md, unsigned int minor)
}
out:
- up(&_minor_lock);
+ mutex_unlock(&_minor_lock);
return r;
}
@@ -758,7 +798,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
int r;
unsigned int m;
- down(&_minor_lock);
+ mutex_lock(&_minor_lock);
r = idr_pre_get(&_minor_idr, GFP_KERNEL);
if (!r) {
@@ -780,7 +820,7 @@ static int next_free_minor(struct mapped_device *md, unsigned int *minor)
*minor = m;
out:
- up(&_minor_lock);
+ mutex_unlock(&_minor_lock);
return r;
}
@@ -842,6 +882,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
add_disk(md->disk);
+ format_dev_t(md->name, MKDEV(_major, minor));
atomic_set(&md->pending, 0);
init_waitqueue_head(&md->wait);
@@ -904,6 +945,13 @@ static int __bind(struct mapped_device *md, struct dm_table *t)
sector_t size;
size = dm_table_get_size(t);
+
+ /*
+ * Wipe any geometry if the size of the table changed.
+ */
+ if (size != get_capacity(md->disk))
+ memset(&md->geometry, 0, sizeof(md->geometry));
+
__set_size(md, size);
if (size == 0)
return 0;
@@ -967,13 +1015,13 @@ static struct mapped_device *dm_find_md(dev_t dev)
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
- down(&_minor_lock);
+ mutex_lock(&_minor_lock);
md = idr_find(&_minor_idr, minor);
if (!md || (dm_disk(md)->first_minor != minor))
md = NULL;
- up(&_minor_lock);
+ mutex_unlock(&_minor_lock);
return md;
}
@@ -988,15 +1036,9 @@ struct mapped_device *dm_get_md(dev_t dev)
return md;
}
-void *dm_get_mdptr(dev_t dev)
+void *dm_get_mdptr(struct mapped_device *md)
{
- struct mapped_device *md;
- void *mdptr = NULL;
-
- md = dm_find_md(dev);
- if (md)
- mdptr = md->interface_ptr;
- return mdptr;
+ return md->interface_ptr;
}
void dm_set_mdptr(struct mapped_device *md, void *ptr)
@@ -1011,18 +1053,18 @@ void dm_get(struct mapped_device *md)
void dm_put(struct mapped_device *md)
{
- struct dm_table *map = dm_get_table(md);
+ struct dm_table *map;
if (atomic_dec_and_test(&md->holders)) {
+ map = dm_get_table(md);
if (!dm_suspended(md)) {
dm_table_presuspend_targets(map);
dm_table_postsuspend_targets(map);
}
__unbind(md);
+ dm_table_put(map);
free_dev(md);
}
-
- dm_table_put(map);
}
/*
@@ -1107,6 +1149,7 @@ int dm_suspend(struct mapped_device *md, int do_lockfs)
{
struct dm_table *map = NULL;
DECLARE_WAITQUEUE(wait, current);
+ struct bio *def;
int r = -EINVAL;
down(&md->suspend_lock);
@@ -1166,9 +1209,11 @@ int dm_suspend(struct mapped_device *md, int do_lockfs)
/* were we interrupted ? */
r = -EINTR;
if (atomic_read(&md->pending)) {
+ clear_bit(DMF_BLOCK_IO, &md->flags);
+ def = bio_list_get(&md->deferred);
+ __flush_deferred_io(md, def);
up_write(&md->io_lock);
unlock_fs(md);
- clear_bit(DMF_BLOCK_IO, &md->flags);
goto out;
}
up_write(&md->io_lock);
@@ -1262,6 +1307,7 @@ int dm_suspended(struct mapped_device *md)
static struct block_device_operations dm_blk_dops = {
.open = dm_blk_open,
.release = dm_blk_close,
+ .getgeo = dm_blk_getgeo,
.owner = THIS_MODULE
};
OpenPOWER on IntegriCloud