diff options
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/Kconfig | 12 | ||||
-rw-r--r-- | drivers/block/Makefile | 1 | ||||
-rw-r--r-- | drivers/block/aoe/aoe.h | 93 | ||||
-rw-r--r-- | drivers/block/aoe/aoeblk.c | 91 | ||||
-rw-r--r-- | drivers/block/aoe/aoechr.c | 13 | ||||
-rw-r--r-- | drivers/block/aoe/aoecmd.c | 1233 | ||||
-rw-r--r-- | drivers/block/aoe/aoedev.c | 265 | ||||
-rw-r--r-- | drivers/block/aoe/aoemain.c | 10 | ||||
-rw-r--r-- | drivers/block/aoe/aoenet.c | 61 | ||||
-rw-r--r-- | drivers/block/cciss_scsi.c | 1 | ||||
-rw-r--r-- | drivers/block/floppy.c | 5 | ||||
-rw-r--r-- | drivers/block/loop.c | 4 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 38 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 10 | ||||
-rw-r--r-- | drivers/block/nbd.c | 32 | ||||
-rw-r--r-- | drivers/block/nvme.c | 155 | ||||
-rw-r--r-- | drivers/block/rbd.c | 1789 | ||||
-rw-r--r-- | drivers/block/rbd_types.h | 27 | ||||
-rw-r--r-- | drivers/block/ub.c | 2474 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 306 | ||||
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 3 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 4 |
22 files changed, 2802 insertions, 3825 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index a796407123c7..f529407db93f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -353,18 +353,6 @@ config BLK_DEV_SX8 Use devices /dev/sx8/$N and /dev/sx8/$Np$M. -config BLK_DEV_UB - tristate "Low Performance USB Block driver (deprecated)" - depends on USB - help - This driver supports certain USB attached storage devices - such as flash keys. - - If you enable this driver, it is recommended to avoid conflicts - with usb-storage by enabling USB_LIBUSUAL. - - If unsure, say N. - config BLK_DEV_RAM tristate "RAM block device support" ---help--- diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 5b795059f8fb..17e82df3df74 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o obj-$(CONFIG_VIODASD) += viodasd.o obj-$(CONFIG_BLK_DEV_SX8) += sx8.o -obj-$(CONFIG_BLK_DEV_UB) += ub.o obj-$(CONFIG_BLK_DEV_HD) += hd.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index db195abad698..d2ed7f18d1ac 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -1,5 +1,5 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ -#define VERSION "47" +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ +#define VERSION "50" #define AOE_MAJOR 152 #define DEVICE_NAME "aoe" @@ -10,9 +10,6 @@ #define AOE_PARTITIONS (16) #endif -#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * NPERSHELF + (aoeminor)) -#define AOEMAJOR(sysminor) ((sysminor) / NPERSHELF) -#define AOEMINOR(sysminor) ((sysminor) % NPERSHELF) #define WHITESPACE " \t\v\f\n" enum { @@ -75,72 +72,67 @@ enum { DEVFL_UP = 1, /* device is installed in system and ready for AoE->ATA commands */ DEVFL_TKILL = (1<<1), /* flag for timer to know when to kill self */ DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ - DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ - DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */ - DEVFL_KICKME = (1<<5), /* slow polling network card catch */ - DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ - - BUFFL_FAIL = 1, + DEVFL_GDALLOC = (1<<3), /* need to alloc gendisk */ + DEVFL_KICKME = (1<<4), /* slow polling network card catch */ + DEVFL_NEWSIZE = (1<<5), /* need to update dev size in block layer */ }; enum { DEFAULTBCNT = 2 * 512, /* 2 sectors */ - NPERSHELF = 16, /* number of slots per shelf address */ - FREETAG = -1, MIN_BUFS = 16, NTARGETS = 8, NAOEIFS = 8, - NSKBPOOLMAX = 128, + NSKBPOOLMAX = 256, + NFACTIVE = 61, TIMERTICK = HZ / 10, MINTIMER = HZ >> 2, MAXTIMER = HZ << 1, - HELPWAIT = 20, }; struct buf { - struct list_head bufs; - ulong stime; /* for disk stats */ - ulong flags; ulong nframesout; ulong resid; ulong bv_resid; - ulong bv_off; sector_t sector; struct bio *bio; struct bio_vec *bv; + struct request *rq; }; struct frame { - int tag; + struct list_head head; + u32 tag; ulong waited; + struct aoetgt *t; /* parent target I belong to */ + sector_t lba; + struct sk_buff *skb; /* command skb freed on module exit */ + struct sk_buff *r_skb; /* response skb for async processing */ struct buf *buf; - char *bufaddr; + struct bio_vec *bv; ulong bcnt; - sector_t lba; - struct sk_buff *skb; + ulong bv_off; }; struct aoeif { struct net_device *nd; - unsigned char lost; - unsigned char lostjumbo; - ushort maxbcnt; + ulong lost; + int bcnt; }; struct aoetgt { unsigned char addr[6]; ushort nframes; - struct frame *frames; + struct aoedev *d; /* parent device I belong to */ + struct list_head ffree; /* list of free frames */ struct aoeif ifs[NAOEIFS]; struct aoeif *ifp; /* current aoeif in use */ ushort nout; ushort maxout; - u16 lasttag; /* last tag sent */ - u16 useme; + ulong falloc; ulong lastwadj; /* last window adjustment */ + int minbcnt; int wpkts, rpkts; - int dataref; }; struct aoedev { @@ -153,6 +145,9 @@ struct aoedev { u16 rttavg; /* round trip average of requests/responses */ u16 mintimer; u16 fw_ver; /* version of blade's firmware */ + u16 lasttag; /* last tag sent */ + u16 useme; + ulong ref; struct work_struct work;/* disk create work struct */ struct gendisk *gd; struct request_queue *blkq; @@ -160,16 +155,31 @@ struct aoedev { sector_t ssize; struct timer_list timer; spinlock_t lock; - struct sk_buff_head sendq; struct sk_buff_head skbpool; mempool_t *bufpool; /* for deadlock-free Buf allocation */ - struct list_head bufq; /* queue of bios to work on */ - struct buf *inprocess; /* the one we're currently working on */ + struct { /* pointers to work in progress */ + struct buf *buf; + struct bio *nxbio; + struct request *rq; + } ip; + ulong maxbcnt; + struct list_head factive[NFACTIVE]; /* hash of active frames */ struct aoetgt *targets[NTARGETS]; struct aoetgt **tgt; /* target in use when working */ - struct aoetgt **htgt; /* target needing rexmit assistance */ + struct aoetgt *htgt; /* target needing rexmit assistance */ + ulong ntargets; + ulong kicked; }; +/* kthread tracking */ +struct ktstate { + struct completion rendez; + struct task_struct *task; + wait_queue_head_t *waitq; + int (*fn) (void); + char *name; + spinlock_t *lock; +}; int aoeblk_init(void); void aoeblk_exit(void); @@ -182,22 +192,29 @@ void aoechr_error(char *); void aoecmd_work(struct aoedev *d); void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); -void aoecmd_ata_rsp(struct sk_buff *); +struct sk_buff *aoecmd_ata_rsp(struct sk_buff *); void aoecmd_cfg_rsp(struct sk_buff *); void aoecmd_sleepwork(struct work_struct *); void aoecmd_cleanslate(struct aoedev *); +void aoecmd_exit(void); +int aoecmd_init(void); struct sk_buff *aoecmd_ata_id(struct aoedev *); +void aoe_freetframe(struct frame *); +void aoe_flush_iocq(void); +void aoe_end_request(struct aoedev *, struct request *, int); +int aoe_ktstart(struct ktstate *k); +void aoe_ktstop(struct ktstate *k); int aoedev_init(void); void aoedev_exit(void); -struct aoedev *aoedev_by_aoeaddr(int maj, int min); -struct aoedev *aoedev_by_sysminor_m(ulong sysminor); +struct aoedev *aoedev_by_aoeaddr(ulong maj, int min, int do_alloc); void aoedev_downdev(struct aoedev *d); int aoedev_flush(const char __user *str, size_t size); +void aoe_failbuf(struct aoedev *, struct buf *); +void aoedev_put(struct aoedev *); int aoenet_init(void); void aoenet_exit(void); void aoenet_xmit(struct sk_buff_head *); int is_aoe_netif(struct net_device *ifp); int set_aoe_iflist(const char __user *str, size_t size); - diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 321de7b6c442..00dfc5008ad4 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoeblk.c * block device routines @@ -161,68 +161,22 @@ aoeblk_release(struct gendisk *disk, fmode_t mode) } static void -aoeblk_make_request(struct request_queue *q, struct bio *bio) +aoeblk_request(struct request_queue *q) { - struct sk_buff_head queue; struct aoedev *d; - struct buf *buf; - ulong flags; - - blk_queue_bounce(q, &bio); - - if (bio == NULL) { - printk(KERN_ERR "aoe: bio is NULL\n"); - BUG(); - return; - } - d = bio->bi_bdev->bd_disk->private_data; - if (d == NULL) { - printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); - BUG(); - bio_endio(bio, -ENXIO); - return; - } else if (bio->bi_io_vec == NULL) { - printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); - BUG(); - bio_endio(bio, -ENXIO); - return; - } - buf = mempool_alloc(d->bufpool, GFP_NOIO); - if (buf == NULL) { - printk(KERN_INFO "aoe: buf allocation failure\n"); - bio_endio(bio, -ENOMEM); - return; - } - memset(buf, 0, sizeof(*buf)); - INIT_LIST_HEAD(&buf->bufs); - buf->stime = jiffies; - buf->bio = bio; - buf->resid = bio->bi_size; - buf->sector = bio->bi_sector; - buf->bv = &bio->bi_io_vec[bio->bi_idx]; - buf->bv_resid = buf->bv->bv_len; - WARN_ON(buf->bv_resid == 0); - buf->bv_off = buf->bv->bv_offset; - - spin_lock_irqsave(&d->lock, flags); + struct request *rq; + d = q->queuedata; if ((d->flags & DEVFL_UP) == 0) { pr_info_ratelimited("aoe: device %ld.%d is not up\n", d->aoemajor, d->aoeminor); - spin_unlock_irqrestore(&d->lock, flags); - mempool_free(buf, d->bufpool); - bio_endio(bio, -ENXIO); + while ((rq = blk_peek_request(q))) { + blk_start_request(rq); + aoe_end_request(d, rq, 1); + } return; } - - list_add_tail(&buf->bufs, &d->bufq); - aoecmd_work(d); - __skb_queue_head_init(&queue); - skb_queue_splice_init(&d->sendq, &queue); - - spin_unlock_irqrestore(&d->lock, flags); - aoenet_xmit(&queue); } static int @@ -254,41 +208,54 @@ aoeblk_gdalloc(void *vp) { struct aoedev *d = vp; struct gendisk *gd; + mempool_t *mp; + struct request_queue *q; + enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, }; ulong flags; gd = alloc_disk(AOE_PARTITIONS); if (gd == NULL) { - printk(KERN_ERR - "aoe: cannot allocate disk structure for %ld.%d\n", + pr_err("aoe: cannot allocate disk structure for %ld.%d\n", d->aoemajor, d->aoeminor); goto err; } - d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); - if (d->bufpool == NULL) { + mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab, + buf_pool_cache); + if (mp == NULL) { printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", d->aoemajor, d->aoeminor); goto err_disk; } + q = blk_init_queue(aoeblk_request, &d->lock); + if (q == NULL) { + pr_err("aoe: cannot allocate block queue for %ld.%d\n", + d->aoemajor, d->aoeminor); + mempool_destroy(mp); + goto err_disk; + } d->blkq = blk_alloc_queue(GFP_KERNEL); if (!d->blkq) goto err_mempool; - blk_queue_make_request(d->blkq, aoeblk_make_request); d->blkq->backing_dev_info.name = "aoe"; if (bdi_init(&d->blkq->backing_dev_info)) goto err_blkq; spin_lock_irqsave(&d->lock, flags); + blk_queue_max_hw_sectors(d->blkq, BLK_DEF_MAX_SECTORS); + q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; + d->bufpool = mp; + d->blkq = gd->queue = q; + q->queuedata = d; + d->gd = gd; gd->major = AOE_MAJOR; - gd->first_minor = d->sysminor * AOE_PARTITIONS; + gd->first_minor = d->sysminor; gd->fops = &aoe_bdops; gd->private_data = d; set_capacity(gd, d->ssize); snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", d->aoemajor, d->aoeminor); - gd->queue = d->blkq; - d->gd = gd; d->flags &= ~DEVFL_GDALLOC; d->flags |= DEVFL_UP; diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index e86d2062a164..ed57a890c643 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoechr.c * AoE character device driver @@ -86,34 +86,34 @@ revalidate(const char __user *str, size_t size) if (copy_from_user(buf, str, size)) return -EFAULT; - /* should be e%d.%d format */ n = sscanf(buf, "e%d.%d", &major, &minor); if (n != 2) { - printk(KERN_ERR "aoe: invalid device specification\n"); + pr_err("aoe: invalid device specification %s\n", buf); return -EINVAL; } - d = aoedev_by_aoeaddr(major, minor); + d = aoedev_by_aoeaddr(major, minor, 0); if (!d) return -EINVAL; spin_lock_irqsave(&d->lock, flags); aoecmd_cleanslate(d); + aoecmd_cfg(major, minor); loop: skb = aoecmd_ata_id(d); spin_unlock_irqrestore(&d->lock, flags); /* try again if we are able to sleep a bit, * otherwise give up this revalidation */ - if (!skb && !msleep_interruptible(200)) { + if (!skb && !msleep_interruptible(250)) { spin_lock_irqsave(&d->lock, flags); goto loop; } + aoedev_put(d); if (skb) { struct sk_buff_head queue; __skb_queue_head_init(&queue); __skb_queue_tail(&queue, skb); aoenet_xmit(&queue); } - aoecmd_cfg(major, minor); return 0; } @@ -174,6 +174,7 @@ aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp break; case MINOR_FLUSH: ret = aoedev_flush(buf, cnt); + break; } if (ret == 0) ret = cnt; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index de0435e63b02..3804a0af3ef1 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoecmd.c * Filesystem request handling methods @@ -12,10 +12,19 @@ #include <linux/netdevice.h> #include <linux/genhd.h> #include <linux/moduleparam.h> +#include <linux/workqueue.h> +#include <linux/kthread.h> #include <net/net_namespace.h> #include <asm/unaligned.h> +#include <linux/uio.h> #include "aoe.h" +#define MAXIOC (8192) /* default meant to avoid most soft lockups */ + +static void ktcomplete(struct frame *, struct sk_buff *); + +static struct buf *nextbuf(struct aoedev *); + static int aoe_deadsecs = 60 * 3; module_param(aoe_deadsecs, int, 0644); MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); @@ -25,6 +34,15 @@ module_param(aoe_maxout, int, 0644); MODULE_PARM_DESC(aoe_maxout, "Only aoe_maxout outstanding packets for every MAC on eX.Y."); +static wait_queue_head_t ktiowq; +static struct ktstate kts; + +/* io completion queue */ +static struct { + struct list_head head; + spinlock_t lock; +} iocq; + static struct sk_buff * new_skb(ulong len) { @@ -35,20 +53,27 @@ new_skb(ulong len) skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); + skb_checksum_none_assert(skb); } return skb; } static struct frame * -getframe(struct aoetgt *t, int tag) +getframe(struct aoedev *d, u32 tag) { - struct frame *f, *e; + struct frame *f; + struct list_head *head, *pos, *nx; + u32 n; - f = t->frames; - e = f + t->nframes; - for (; f<e; f++) - if (f->tag == tag) + n = tag % NFACTIVE; + head = &d->factive[n]; + list_for_each_safe(pos, nx, head) { + f = list_entry(pos, struct frame, head); + if (f->tag == tag) { + list_del(pos); return f; + } + } return NULL; } @@ -58,18 +83,18 @@ getframe(struct aoetgt *t, int tag) * This driver reserves tag -1 to mean "unused frame." */ static int -newtag(struct aoetgt *t) +newtag(struct aoedev *d) { register ulong n; n = jiffies & 0xffff; - return n |= (++t->lasttag & 0x7fff) << 16; + return n |= (++d->lasttag & 0x7fff) << 16; } -static int +static u32 aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) { - u32 host_tag = newtag(t); + u32 host_tag = newtag(d); memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); memcpy(h->dst, t->addr, sizeof h->dst); @@ -94,16 +119,18 @@ put_lba(struct aoe_atahdr *ah, sector_t lba) ah->lba5 = lba >>= 8; } -static void +static struct aoeif * ifrotate(struct aoetgt *t) { - t->ifp++; - if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL) - t->ifp = t->ifs; - if (t->ifp->nd == NULL) { - printk(KERN_INFO "aoe: no interface to rotate to\n"); - BUG(); - } + struct aoeif *ifp; + + ifp = t->ifp; + ifp++; + if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL) + ifp = t->ifs; + if (ifp->nd == NULL) + return NULL; + return t->ifp = ifp; } static void @@ -128,78 +155,128 @@ skb_pool_get(struct aoedev *d) return NULL; } -/* freeframe is where we do our load balancing so it's a little hairy. */ +void +aoe_freetframe(struct frame *f) +{ + struct aoetgt *t; + + t = f->t; + f->buf = NULL; + f->bv = NULL; + f->r_skb = NULL; + list_add(&f->head, &t->ffree); +} + static struct frame * -freeframe(struct aoedev *d) +newtframe(struct aoedev *d, struct aoetgt *t) { - struct frame *f, *e, *rf; - struct aoetgt **t; + struct frame *f; struct sk_buff *skb; + struct list_head *pos; + + if (list_empty(&t->ffree)) { + if (t->falloc >= NSKBPOOLMAX*2) + return NULL; + f = kcalloc(1, sizeof(*f), GFP_ATOMIC); + if (f == NULL) + return NULL; + t->falloc++; + f->t = t; + } else { + pos = t->ffree.next; + list_del(pos); + f = list_entry(pos, struct frame, head); + } + + skb = f->skb; + if (skb == NULL) { + f->skb = skb = new_skb(ETH_ZLEN); + if (!skb) { +bail: aoe_freetframe(f); + return NULL; + } + } + + if (atomic_read(&skb_shinfo(skb)->dataref) != 1) { + skb = skb_pool_get(d); + if (skb == NULL) + goto bail; + skb_pool_put(d, f->skb); + f->skb = skb; + } + + skb->truesize -= skb->data_len; + skb_shinfo(skb)->nr_frags = skb->data_len = 0; + skb_trim(skb, 0); + return f; +} + +static struct frame * +newframe(struct aoedev *d) +{ + struct frame *f; + struct aoetgt *t, **tt; + int totout = 0; if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */ printk(KERN_ERR "aoe: NULL TARGETS!\n"); return NULL; } - t = d->tgt; - t++; - if (t >= &d->targets[NTARGETS] || !*t) - t = d->targets; + tt = d->tgt; /* last used target */ for (;;) { - if ((*t)->nout < (*t)->maxout + tt++; + if (tt >= &d->targets[NTARGETS] || !*tt) + tt = d->targets; + t = *tt; + totout += t->nout; + if (t->nout < t->maxout && t != d->htgt - && (*t)->ifp->nd) { - rf = NULL; - f = (*t)->frames; - e = f + (*t)->nframes; - for (; f < e; f++) { - if (f->tag != FREETAG) - continue; - skb = f->skb; - if (!skb - && !(f->skb = skb = new_skb(ETH_ZLEN))) - continue; - if (atomic_read(&skb_shinfo(skb)->dataref) - != 1) { - if (!rf) - rf = f; - continue; - } -gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0; - skb_trim(skb, 0); - d->tgt = t; - ifrotate(*t); + && t->ifp->nd) { + f = newtframe(d, t); + if (f) { + ifrotate(t); + d->tgt = tt; return f; } - /* Work can be done, but the network layer is - holding our precious packets. Try to grab - one from the pool. */ - f = rf; - if (f == NULL) { /* more paranoia */ - printk(KERN_ERR - "aoe: freeframe: %s.\n", - "unexpected null rf"); - d->flags |= DEVFL_KICKME; - return NULL; - } - skb = skb_pool_get(d); - if (skb) { - skb_pool_put(d, f->skb); - f->skb = skb; - goto gotone; - } - (*t)->dataref++; - if ((*t)->nout == 0) - d->flags |= DEVFL_KICKME; } - if (t == d->tgt) /* we've looped and found nada */ + if (tt == d->tgt) /* we've looped and found nada */ break; - t++; - if (t >= &d->targets[NTARGETS] || !*t) - t = d->targets; + } + if (totout == 0) { + d->kicked++; + d->flags |= DEVFL_KICKME; } return NULL; } +static void +skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt) +{ + int frag = 0; + ulong fcnt; +loop: + fcnt = bv->bv_len - (off - bv->bv_offset); + if (fcnt > cnt) + fcnt = cnt; + skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt); + cnt -= fcnt; + if (cnt <= 0) + return; + bv++; + off = bv->bv_offset; + goto loop; +} + +static void +fhash(struct frame *f) +{ + struct aoedev *d = f->t->d; + u32 n; + + n = f->tag % NFACTIVE; + list_add_tail(&f->head, &d->factive[n]); +} + static int aoecmd_ata_rw(struct aoedev *d) { @@ -207,26 +284,47 @@ aoecmd_ata_rw(struct aoedev *d) struct aoe_hdr *h; struct aoe_atahdr *ah; struct buf *buf; - struct bio_vec *bv; struct aoetgt *t; struct sk_buff *skb; - ulong bcnt; + struct sk_buff_head queue; + ulong bcnt, fbcnt; char writebit, extbit; writebit = 0x10; extbit = 0x4; - f = freeframe(d); + buf = nextbuf(d); + if (buf == NULL) + return 0; + f = newframe(d); if (f == NULL) return 0; t = *d->tgt; - buf = d->inprocess; - bv = buf->bv; - bcnt = t->ifp->maxbcnt; + bcnt = d->maxbcnt; if (bcnt == 0) bcnt = DEFAULTBCNT; - if (bcnt > buf->bv_resid) - bcnt = buf->bv_resid; + if (bcnt > buf->resid) + bcnt = buf->resid; + fbcnt = bcnt; + f->bv = buf->bv; + f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid); + do { + if (fbcnt < buf->bv_resid) { + buf->bv_resid -= fbcnt; + buf->resid -= fbcnt; + break; + } + fbcnt -= buf->bv_resid; + buf->resid -= buf->bv_resid; + if (buf->resid == 0) { + d->ip.buf = NULL; + break; + } + buf->bv++; + buf->bv_resid = buf->bv->bv_len; + WARN_ON(buf->bv_resid == 0); + } while (fbcnt); + /* initialize the headers & frame */ skb = f->skb; h = (struct aoe_hdr *) skb_mac_header(skb); @@ -234,10 +332,10 @@ aoecmd_ata_rw(struct aoedev *d) skb_put(skb, sizeof *h + sizeof *ah); memset(h, 0, skb->len); f->tag = aoehdr_atainit(d, t, h); + fhash(f); t->nout++; f->waited = 0; f->buf = buf; - f->bufaddr = page_address(bv->bv_page) + buf->bv_off; f->bcnt = bcnt; f->lba = buf->sector; @@ -252,10 +350,11 @@ aoecmd_ata_rw(struct aoedev *d) ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ } if (bio_data_dir(buf->bio) == WRITE) { - skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt); + skb_fillup(skb, f->bv, f->bv_off, bcnt); ah->aflags |= AOEAFL_WRITE; skb->len += bcnt; skb->data_len = bcnt; + skb->truesize += bcnt; t->wpkts++; } else { t->rpkts++; @@ -266,23 +365,15 @@ aoecmd_ata_rw(struct aoedev *d) /* mark all tracking fields and load out */ buf->nframesout += 1; - buf->bv_off += bcnt; - buf->bv_resid -= bcnt; - buf->resid -= bcnt; buf->sector += bcnt >> 9; - if (buf->resid == 0) { - d->inprocess = NULL; - } else if (buf->bv_resid == 0) { - buf->bv = ++bv; - buf->bv_resid = bv->bv_len; - WARN_ON(buf->bv_resid == 0); - buf->bv_off = bv->bv_offset; - } skb->dev = t->ifp->nd; skb = skb_clone(skb, GFP_ATOMIC); - if (skb) - __skb_queue_tail(&d->sendq, skb); + if (skb) { + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); + aoenet_xmit(&queue); + } return 1; } @@ -329,17 +420,25 @@ cont: } static void -resend(struct aoedev *d, struct aoetgt *t, struct frame *f) +resend(struct aoedev *d, struct frame *f) { struct sk_buff *skb; + struct sk_buff_head queue; struct aoe_hdr *h; struct aoe_atahdr *ah; + struct aoetgt *t; char buf[128]; u32 n; - ifrotate(t); - n = newtag(t); + t = f->t; + n = newtag(d); skb = f->skb; + if (ifrotate(t) == NULL) { + /* probably can't happen, but set it up to fail anyway */ + pr_info("aoe: resend: no interfaces to rotate to.\n"); + ktcomplete(f, NULL); + return; + } h = (struct aoe_hdr *) skb_mac_header(skb); ah = (struct aoe_atahdr *) (h+1); @@ -350,39 +449,22 @@ resend(struct aoedev *d, struct aoetgt *t, struct frame *f) aoechr_error(buf); f->tag = n; + fhash(f); h->tag = cpu_to_be32(n); memcpy(h->dst, t->addr, sizeof h->dst); memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); - switch (ah->cmdstat) { - default: - break; - case ATA_CMD_PIO_READ: - case ATA_CMD_PIO_READ_EXT: - case ATA_CMD_PIO_WRITE: - case ATA_CMD_PIO_WRITE_EXT: - put_lba(ah, f->lba); - - n = f->bcnt; - if (n > DEFAULTBCNT) - n = DEFAULTBCNT; - ah->scnt = n >> 9; - if (ah->aflags & AOEAFL_WRITE) { - skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), - offset_in_page(f->bufaddr), n); - skb->len = sizeof *h + sizeof *ah + n; - skb->data_len = n; - } - } skb->dev = t->ifp->nd; skb = skb_clone(skb, GFP_ATOMIC); if (skb == NULL) return; - __skb_queue_tail(&d->sendq, skb); + __skb_queue_head_init(&queue); + __skb_queue_tail(&queue, skb); + aoenet_xmit(&queue); } static int -tsince(int tag) +tsince(u32 tag) { int n; @@ -406,58 +488,65 @@ getif(struct aoetgt *t, struct net_device *nd) return NULL; } -static struct aoeif * -addif(struct aoetgt *t, struct net_device *nd) -{ - struct aoeif *p; - - p = getif(t, NULL); - if (!p) - return NULL; - p->nd = nd; - p->maxbcnt = DEFAULTBCNT; - p->lost = 0; - p->lostjumbo = 0; - return p; -} - static void ejectif(struct aoetgt *t, struct aoeif *ifp) { struct aoeif *e; + struct net_device *nd; ulong n; + nd = ifp->nd; e = t->ifs + NAOEIFS - 1; n = (e - ifp) * sizeof *ifp; memmove(ifp, ifp+1, n); e->nd = NULL; + dev_put(nd); } static int sthtith(struct aoedev *d) { - struct frame *f, *e, *nf; + struct frame *f, *nf; + struct list_head *nx, *pos, *head; struct sk_buff *skb; - struct aoetgt *ht = *d->htgt; - - f = ht->frames; - e = f + ht->nframes; - for (; f < e; f++) { - if (f->tag == FREETAG) - continue; - nf = freeframe(d); - if (!nf) - return 0; - skb = nf->skb; - *nf = *f; - f->skb = skb; - f->tag = FREETAG; - nf->waited = 0; - ht->nout--; - (*d->tgt)->nout++; - resend(d, *d->tgt, nf); + struct aoetgt *ht = d->htgt; + int i; + + for (i = 0; i < NFACTIVE; i++) { + head = &d->factive[i]; + list_for_each_safe(pos, nx, head) { + f = list_entry(pos, struct frame, head); + if (f->t != ht) + continue; + + nf = newframe(d); + if (!nf) + return 0; + + /* remove frame from active list */ + list_del(pos); + + /* reassign all pertinent bits to new outbound frame */ + skb = nf->skb; + nf->skb = f->skb; + nf->buf = f->buf; + nf->bcnt = f->bcnt; + nf->lba = f->lba; + nf->bv = f->bv; + nf->bv_off = f->bv_off; + nf->waited = 0; + f->skb = skb; + aoe_freetframe(f); + ht->nout--; + nf->t->nout++; + resend(d, nf); + } } - /* he's clean, he's useless. take away his interfaces */ + /* We've cleaned up the outstanding so take away his + * interfaces so he won't be used. We should remove him from + * the target array here, but cleaning up a target is + * involved. PUNT! + */ memset(ht->ifs, 0, sizeof ht->ifs); d->htgt = NULL; return 1; @@ -476,13 +565,15 @@ ata_scnt(unsigned char *packet) { static void rexmit_timer(ulong vp) { - struct sk_buff_head queue; struct aoedev *d; struct aoetgt *t, **tt, **te; struct aoeif *ifp; - struct frame *f, *e; + struct frame *f; + struct list_head *head, *pos, *nx; + LIST_HEAD(flist); register long timeout; ulong flags, n; + int i; d = (struct aoedev *) vp; @@ -496,58 +587,22 @@ rexmit_timer(ulong vp) spin_unlock_irqrestore(&d->lock, flags); return; } - tt = d->targets; - te = tt + NTARGETS; - for (; tt < te && *tt; tt++) { - t = *tt; - f = t->frames; - e = f + t->nframes; - for (; f < e; f++) { - if (f->tag == FREETAG - || tsince(f->tag) < timeout) - continue; - n = f->waited += timeout; - n /= HZ; - if (n > aoe_deadsecs) { - /* waited too long. device failure. */ - aoedev_downdev(d); - break; - } - - if (n > HELPWAIT /* see if another target can help */ - && (tt != d->targets || d->targets[1])) - d->htgt = tt; - - if (t->nout == t->maxout) { - if (t->maxout > 1) - t->maxout--; - t->lastwadj = jiffies; - } - - ifp = getif(t, f->skb->dev); - if (ifp && ++ifp->lost > (t->nframes << 1) - && (ifp != t->ifs || t->ifs[1].nd)) { - ejectif(t, ifp); - ifp = NULL; - } - if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512 - && ifp && ++ifp->lostjumbo > (t->nframes << 1) - && ifp->maxbcnt != DEFAULTBCNT) { - printk(KERN_INFO - "aoe: e%ld.%d: " - "too many lost jumbo on " - "%s:%pm - " - "falling back to %d frames.\n", - d->aoemajor, d->aoeminor, - ifp->nd->name, t->addr, - DEFAULTBCNT); - ifp->maxbcnt = 0; - } - resend(d, t, f); + /* collect all frames to rexmit into flist */ + for (i = 0; i < NFACTIVE; i++) { + head = &d->factive[i]; + list_for_each_safe(pos, nx, head) { + f = list_entry(pos, struct frame, head); + if (tsince(f->tag) < timeout) + break; /* end of expired frames */ + /* move to flist for later processing */ + list_move_tail(pos, &flist); } - - /* window check */ + } + /* window check */ + tt = d->targets; + te = tt + d->ntargets; + for (; tt < te && (t = *tt); tt++) { if (t->nout == t->maxout && t->maxout < t->nframes && (jiffies - t->lastwadj)/HZ > 10) { @@ -556,45 +611,173 @@ rexmit_timer(ulong vp) } } - if (!skb_queue_empty(&d->sendq)) { + if (!list_empty(&flist)) { /* retransmissions necessary */ n = d->rttavg <<= 1; if (n > MAXTIMER) d->rttavg = MAXTIMER; } - if (d->flags & DEVFL_KICKME || d->htgt) { - d->flags &= ~DEVFL_KICKME; - aoecmd_work(d); + /* process expired frames */ + while (!list_empty(&flist)) { + pos = flist.next; + f = list_entry(pos, struct frame, head); + n = f->waited += timeout; + n /= HZ; + if (n > aoe_deadsecs) { + /* Waited too long. Device failure. + * Hang all frames on first hash bucket for downdev + * to clean up. + */ + list_splice(&flist, &d->factive[0]); + aoedev_downdev(d); + break; + } + list_del(pos); + + t = f->t; + if (n > aoe_deadsecs/2) + d->htgt = t; /* see if another target can help */ + + if (t->nout == t->maxout) { + if (t->maxout > 1) + t->maxout--; + t->lastwadj = jiffies; + } + + ifp = getif(t, f->skb->dev); + if (ifp && ++ifp->lost > (t->nframes << 1) + && (ifp != t->ifs || t->ifs[1].nd)) { + ejectif(t, ifp); + ifp = NULL; + } + resend(d, f); } - __skb_queue_head_init(&queue); - skb_queue_splice_init(&d->sendq, &queue); + if ((d->flags & DEVFL_KICKME || d->htgt) && d->blkq) { + d->flags &= ~DEVFL_KICKME; + d->blkq->request_fn(d->blkq); + } d->timer.expires = jiffies + TIMERTICK; add_timer(&d->timer); spin_unlock_irqrestore(&d->lock, flags); +} - aoenet_xmit(&queue); +static unsigned long +rqbiocnt(struct request *r) +{ + struct bio *bio; + unsigned long n = 0; + + __rq_for_each_bio(bio, r) + n++; + return n; +} + +/* This can be removed if we are certain that no users of the block + * layer will ever use zero-count pages in bios. Otherwise we have to + * protect against the put_page sometimes done by the network layer. + * + * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for + * discussion. + * + * We cannot use get_page in the workaround, because it insists on a + * positive page count as a precondition. So we use _count directly. + */ +static void +bio_pageinc(struct bio *bio) +{ + struct bio_vec *bv; + struct page *page; + int i; + + bio_for_each_segment(bv, bio, i) { + page = bv->bv_page; + /* Non-zero page count for non-head members of + * compound pages is no longer allowed by the kernel, + * but this has never been seen here. + */ + if (unlikely(PageCompound(page))) + if (compound_trans_head(page) != page) { + pr_crit("page tail used for block I/O\n"); + BUG(); + } + atomic_inc(&page->_count); + } +} + +static void +bio_pagedec(struct bio *bio) +{ + struct bio_vec *bv; + int i; + + bio_for_each_segment(bv, bio, i) + atomic_dec(&bv->bv_page->_count); +} + +static void +bufinit(struct buf *buf, struct request *rq, struct bio *bio) +{ + struct bio_vec *bv; + + memset(buf, 0, sizeof(*buf)); + buf->rq = rq; + buf->bio = bio; + buf->resid = bio->bi_size; + buf->sector = bio->bi_sector; + bio_pageinc(bio); + buf->bv = bv = &bio->bi_io_vec[bio->bi_idx]; + buf->bv_resid = bv->bv_len; + WARN_ON(buf->bv_resid == 0); +} + +static struct buf * +nextbuf(struct aoedev *d) +{ + struct request *rq; + struct request_queue *q; + struct buf *buf; + struct bio *bio; + + q = d->blkq; + if (q == NULL) + return NULL; /* initializing */ + if (d->ip.buf) + return d->ip.buf; + rq = d->ip.rq; + if (rq == NULL) { + rq = blk_peek_request(q); + if (rq == NULL) + return NULL; + blk_start_request(rq); + d->ip.rq = rq; + d->ip.nxbio = rq->bio; + rq->special = (void *) rqbiocnt(rq); + } + buf = mempool_alloc(d->bufpool, GFP_ATOMIC); + if (buf == NULL) { + pr_err("aoe: nextbuf: unable to mempool_alloc!\n"); + return NULL; + } + bio = d->ip.nxbio; + bufinit(buf, rq, bio); + bio = bio->bi_next; + d->ip.nxbio = bio; + if (bio == NULL) + d->ip.rq = NULL; + return d->ip.buf = buf; } /* enters with d->lock held */ void aoecmd_work(struct aoedev *d) { - struct buf *buf; -loop: if (d->htgt && !sthtith(d)) return; - if (d->inprocess == NULL) { - if (list_empty(&d->bufq)) - return; - buf = container_of(d->bufq.next, struct buf, bufs); - list_del(d->bufq.next); - d->inprocess = buf; - } - if (aoecmd_ata_rw(d)) - goto loop; + while (aoecmd_ata_rw(d)) + ; } /* this function performs work that has been deferred until sleeping is OK @@ -603,28 +786,25 @@ void aoecmd_sleepwork(struct work_struct *work) { struct aoedev *d = container_of(work, struct aoedev, work); + struct block_device *bd; + u64 ssize; if (d->flags & DEVFL_GDALLOC) aoeblk_gdalloc(d); if (d->flags & DEVFL_NEWSIZE) { - struct block_device *bd; - unsigned long flags; - u64 ssize; - ssize = get_capacity(d->gd); bd = bdget_disk(d->gd, 0); - if (bd) { mutex_lock(&bd->bd_inode->i_mutex); i_size_write(bd->bd_inode, (loff_t)ssize<<9); mutex_unlock(&bd->bd_inode->i_mutex); bdput(bd); } - spin_lock_irqsave(&d->lock, flags); + spin_lock_irq(&d->lock); d->flags |= DEVFL_UP; d->flags &= ~DEVFL_NEWSIZE; - spin_unlock_irqrestore(&d->lock, flags); + spin_unlock_irq(&d->lock); } } @@ -717,163 +897,299 @@ gettgt(struct aoedev *d, char *addr) return NULL; } -static inline void -diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector) +static void +bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt) +{ + ulong fcnt; + char *p; + int soff = 0; +loop: + fcnt = bv->bv_len - (off - bv->bv_offset); + if (fcnt > cnt) + fcnt = cnt; + p = page_address(bv->bv_page) + off; + skb_copy_bits(skb, soff, p, fcnt); + soff += fcnt; + cnt -= fcnt; + if (cnt <= 0) + return; + bv++; + off = bv->bv_offset; + goto loop; +} + +void +aoe_end_request(struct aoedev *d, struct request *rq, int fastfail) +{ + struct bio *bio; + int bok; + struct request_queue *q; + + q = d->blkq; + if (rq == d->ip.rq) + d->ip.rq = NULL; + do { + bio = rq->bio; + bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); + } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); + + /* cf. http://lkml.org/lkml/2006/10/31/28 */ + if (!fastfail) + q->request_fn(q); +} + +static void +aoe_end_buf(struct aoedev *d, struct buf *buf) +{ + struct request *rq; + unsigned long n; + + if (buf == d->ip.buf) + d->ip.buf = NULL; + rq = buf->rq; + bio_pagedec(buf->bio); + mempool_free(buf, d->bufpool); + n = (unsigned long) rq->special; + rq->special = (void *) --n; + if (n == 0) + aoe_end_request(d, rq, 0); +} + +static void +ktiocomplete(struct frame *f) { - unsigned long n_sect = bio->bi_size >> 9; - const int rw = bio_data_dir(bio); - struct hd_struct *part; - int cpu; + struct aoe_hdr *hin, *hout; + struct aoe_atahdr *ahin, *ahout; + struct buf *buf; + struct sk_buff *skb; + struct aoetgt *t; + struct aoeif *ifp; + struct aoedev *d; + long n; + + if (f == NULL) + return; + + t = f->t; + d = t->d; + + hout = (struct aoe_hdr *) skb_mac_header(f->skb); + ahout = (struct aoe_atahdr *) (hout+1); + buf = f->buf; + skb = f->r_skb; + if (skb == NULL) + goto noskb; /* just fail the buf. */ + + hin = (struct aoe_hdr *) skb->data; + skb_pull(skb, sizeof(*hin)); + ahin = (struct aoe_atahdr *) skb->data; + skb_pull(skb, sizeof(*ahin)); + if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ + pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n", + ahout->cmdstat, ahin->cmdstat, + d->aoemajor, d->aoeminor); +noskb: if (buf) + clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + goto badrsp; + } - cpu = part_stat_lock(); - part = disk_map_sector_rcu(disk, sector); + n = ahout->scnt << 9; + switch (ahout->cmdstat) { + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + if (skb->len < n) { + pr_err("aoe: runt data size in read. skb->len=%d need=%ld\n", + skb->len, n); + clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + break; + } + bvcpy(f->bv, f->bv_off, skb, n); + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + spin_lock_irq(&d->lock); + ifp = getif(t, skb->dev); + if (ifp) + ifp->lost = 0; + if (d->htgt == t) /* I'll help myself, thank you. */ + d->htgt = NULL; + spin_unlock_irq(&d->lock); + break; + case ATA_CMD_ID_ATA: + if (skb->len < 512) { + pr_info("aoe: runt data size in ataid. skb->len=%d\n", + skb->len); + break; + } + if (skb_linearize(skb)) + break; + spin_lock_irq(&d->lock); + ataid_complete(d, t, skb->data); + spin_unlock_irq(&d->lock); + break; + default: + pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n", + ahout->cmdstat, + be16_to_cpu(get_unaligned(&hin->major)), + hin->minor); + } +badrsp: + spin_lock_irq(&d->lock); + + aoe_freetframe(f); + + if (buf && --buf->nframesout == 0 && buf->resid == 0) + aoe_end_buf(d, buf); + + aoecmd_work(d); + + spin_unlock_irq(&d->lock); + aoedev_put(d); + dev_kfree_skb(skb); +} + +/* Enters with iocq.lock held. + * Returns true iff responses needing processing remain. + */ +static int +ktio(void) +{ + struct frame *f; + struct list_head *pos; + int i; - part_stat_inc(cpu, part, ios[rw]); - part_stat_add(cpu, part, ticks[rw], duration); - part_stat_add(cpu, part, sectors[rw], n_sect); - part_stat_add(cpu, part, io_ticks, duration); + for (i = 0; ; ++i) { + if (i == MAXIOC) + return 1; + if (list_empty(&iocq.head)) + return 0; + pos = iocq.head.next; + list_del(pos); + spin_unlock_irq(&iocq.lock); + f = list_entry(pos, struct frame, head); + ktiocomplete(f); + spin_lock_irq(&iocq.lock); + } +} - part_stat_unlock(); +static int +kthread(void *vp) +{ + struct ktstate *k; + DECLARE_WAITQUEUE(wait, current); + int more; + + k = vp; + current->flags |= PF_NOFREEZE; + set_user_nice(current, -10); + complete(&k->rendez); /* tell spawner we're running */ + do { + spin_lock_irq(k->lock); + more = k->fn(); + if (!more) { + add_wait_queue(k->waitq, &wait); + __set_current_state(TASK_INTERRUPTIBLE); + } + spin_unlock_irq(k->lock); + if (!more) { + schedule(); + remove_wait_queue(k->waitq, &wait); + } else + cond_resched(); + } while (!kthread_should_stop()); + complete(&k->rendez); /* tell spawner we're stopping */ + return 0; } void +aoe_ktstop(struct ktstate *k) +{ + kthread_stop(k->task); + wait_for_completion(&k->rendez); +} + +int +aoe_ktstart(struct ktstate *k) +{ + struct task_struct *task; + + init_completion(&k->rendez); + task = kthread_run(kthread, k, k->name); + if (task == NULL || IS_ERR(task)) + return -ENOMEM; + k->task = task; + wait_for_completion(&k->rendez); /* allow kthread to start */ + init_completion(&k->rendez); /* for waiting for exit later */ + return 0; +} + +/* pass it off to kthreads for processing */ +static void +ktcomplete(struct frame *f, struct sk_buff *skb) +{ + ulong flags; + + f->r_skb = skb; + spin_lock_irqsave(&iocq.lock, flags); + list_add_tail(&f->head, &iocq.head); + spin_unlock_irqrestore(&iocq.lock, flags); + wake_up(&ktiowq); +} + +struct sk_buff * aoecmd_ata_rsp(struct sk_buff *skb) { - struct sk_buff_head queue; struct aoedev *d; - struct aoe_hdr *hin, *hout; - struct aoe_atahdr *ahin, *ahout; + struct aoe_hdr *h; struct frame *f; - struct buf *buf; struct aoetgt *t; - struct aoeif *ifp; - register long n; + u32 n; ulong flags; char ebuf[128]; u16 aoemajor; - hin = (struct aoe_hdr *) skb_mac_header(skb); - aoemajor = get_unaligned_be16(&hin->major); - d = aoedev_by_aoeaddr(aoemajor, hin->minor); + h = (struct aoe_hdr *) skb->data; + aoemajor = be16_to_cpu(get_unaligned(&h->major)); + d = aoedev_by_aoeaddr(aoemajor, h->minor, 0); if (d == NULL) { snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " "for unknown device %d.%d\n", - aoemajor, hin->minor); + aoemajor, h->minor); aoechr_error(ebuf); - return; + return skb; } spin_lock_irqsave(&d->lock, flags); - n = get_unaligned_be32(&hin->tag); - t = gettgt(d, hin->src); - if (t == NULL) { - printk(KERN_INFO "aoe: can't find target e%ld.%d:%pm\n", - d->aoemajor, d->aoeminor, hin->src); - spin_unlock_irqrestore(&d->lock, flags); - return; - } - f = getframe(t, n); + n = be32_to_cpu(get_unaligned(&h->tag)); + f = getframe(d, n); if (f == NULL) { calc_rttavg(d, -tsince(n)); spin_unlock_irqrestore(&d->lock, flags); + aoedev_put(d); snprintf(ebuf, sizeof ebuf, "%15s e%d.%d tag=%08x@%08lx\n", "unexpected rsp", - get_unaligned_be16(&hin->major), - hin->minor, - get_unaligned_be32(&hin->tag), + get_unaligned_be16(&h->major), + h->minor, + get_unaligned_be32(&h->tag), jiffies); aoechr_error(ebuf); - return; + return skb; } - + t = f->t; calc_rttavg(d, tsince(f->tag)); - - ahin = (struct aoe_atahdr *) (hin+1); - hout = (struct aoe_hdr *) skb_mac_header(f->skb); - ahout = (struct aoe_atahdr *) (hout+1); - buf = f->buf; - - if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ - printk(KERN_ERR - "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n", - ahout->cmdstat, ahin->cmdstat, - d->aoemajor, d->aoeminor); - if (buf) - buf->flags |= BUFFL_FAIL; - } else { - if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */ - d->htgt = NULL; - n = ahout->scnt << 9; - switch (ahout->cmdstat) { - case ATA_CMD_PIO_READ: - case ATA_CMD_PIO_READ_EXT: - if (skb->len - sizeof *hin - sizeof *ahin < n) { - printk(KERN_ERR - "aoe: %s. skb->len=%d need=%ld\n", - "runt data size in read", skb->len, n); - /* fail frame f? just returning will rexmit. */ - spin_unlock_irqrestore(&d->lock, flags); - return; - } - memcpy(f->bufaddr, ahin+1, n); - case ATA_CMD_PIO_WRITE: - case ATA_CMD_PIO_WRITE_EXT: - ifp = getif(t, skb->dev); - if (ifp) { - ifp->lost = 0; - if (n > DEFAULTBCNT) - ifp->lostjumbo = 0; - } - if (f->bcnt -= n) { - f->lba += n >> 9; - f->bufaddr += n; - resend(d, t, f); - goto xmit; - } - break; - case ATA_CMD_ID_ATA: - if (skb->len - sizeof *hin - sizeof *ahin < 512) { - printk(KERN_INFO - "aoe: runt data size in ataid. skb->len=%d\n", - skb->len); - spin_unlock_irqrestore(&d->lock, flags); - return; - } - ataid_complete(d, t, (char *) (ahin+1)); - break; - default: - printk(KERN_INFO - "aoe: unrecognized ata command %2.2Xh for %d.%d\n", - ahout->cmdstat, - get_unaligned_be16(&hin->major), - hin->minor); - } - } - - if (buf && --buf->nframesout == 0 && buf->resid == 0) { - diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); - if (buf->flags & BUFFL_FAIL) - bio_endio(buf->bio, -EIO); - else { - bio_flush_dcache_pages(buf->bio); - bio_endio(buf->bio, 0); - } - mempool_free(buf, d->bufpool); - } - - f->buf = NULL; - f->tag = FREETAG; t->nout--; - aoecmd_work(d); -xmit: - __skb_queue_head_init(&queue); - skb_queue_splice_init(&d->sendq, &queue); spin_unlock_irqrestore(&d->lock, flags); - aoenet_xmit(&queue); + + ktcomplete(f, skb); + + /* + * Note here that we do not perform an aoedev_put, as we are + * leaving this reference for the ktio to release. + */ + return NULL; } void @@ -895,7 +1211,7 @@ aoecmd_ata_id(struct aoedev *d) struct sk_buff *skb; struct aoetgt *t; - f = freeframe(d); + f = newframe(d); if (f == NULL) return NULL; @@ -908,6 +1224,7 @@ aoecmd_ata_id(struct aoedev *d) skb_put(skb, sizeof *h + sizeof *ah); memset(h, 0, skb->len); f->tag = aoehdr_atainit(d, t, h); + fhash(f); t->nout++; f->waited = 0; @@ -928,7 +1245,6 @@ static struct aoetgt * addtgt(struct aoedev *d, char *addr, ulong nframes) { struct aoetgt *t, **tt, **te; - struct frame *f, *e; tt = d->targets; te = tt + NTARGETS; @@ -940,26 +1256,73 @@ addtgt(struct aoedev *d, char *addr, ulong nframes) "aoe: device addtgt failure; too many targets\n"); return NULL; } - t = kcalloc(1, sizeof *t, GFP_ATOMIC); - f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); - if (!t || !f) { - kfree(f); - kfree(t); + t = kzalloc(sizeof(*t), GFP_ATOMIC); + if (!t) { printk(KERN_INFO "aoe: cannot allocate memory to add target\n"); return NULL; } + d->ntargets++; t->nframes = nframes; - t->frames = f; - e = f + nframes; - for (; f < e; f++) - f->tag = FREETAG; + t->d = d; memcpy(t->addr, addr, sizeof t->addr); t->ifp = t->ifs; t->maxout = t->nframes; + INIT_LIST_HEAD(&t->ffree); return *tt = t; } +static void +setdbcnt(struct aoedev *d) +{ + struct aoetgt **t, **e; + int bcnt = 0; + + t = d->targets; + e = t + NTARGETS; + for (; t < e && *t; t++) + if (bcnt == 0 || bcnt > (*t)->minbcnt) + bcnt = (*t)->minbcnt; + if (bcnt != d->maxbcnt) { + d->maxbcnt = bcnt; + pr_info("aoe: e%ld.%d: setting %d byte data frames\n", + d->aoemajor, d->aoeminor, bcnt); + } +} + +static void +setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt) +{ + struct aoedev *d; + struct aoeif *p, *e; + int minbcnt; + + d = t->d; + minbcnt = bcnt; + p = t->ifs; + e = p + NAOEIFS; + for (; p < e; p++) { + if (p->nd == NULL) + break; /* end of the valid interfaces */ + if (p->nd == nd) { + p->bcnt = bcnt; /* we're updating */ + nd = NULL; + } else if (minbcnt > p->bcnt) + minbcnt = p->bcnt; /* find the min interface */ + } + if (nd) { + if (p == e) { + pr_err("aoe: device setifbcnt failure; too many interfaces.\n"); + return; + } + dev_hold(nd); + p->nd = nd; + p->bcnt = bcnt; + } + t->minbcnt = minbcnt; + setdbcnt(d); +} + void aoecmd_cfg_rsp(struct sk_buff *skb) { @@ -967,11 +1330,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb) struct aoe_hdr *h; struct aoe_cfghdr *ch; struct aoetgt *t; - struct aoeif *ifp; - ulong flags, sysminor, aoemajor; + ulong flags, aoemajor; struct sk_buff *sl; + struct sk_buff_head queue; u16 n; + sl = NULL; h = (struct aoe_hdr *) skb_mac_header(skb); ch = (struct aoe_cfghdr *) (h+1); @@ -985,10 +1349,13 @@ aoecmd_cfg_rsp(struct sk_buff *skb) "Check shelf dip switches.\n"); return; } - - sysminor = SYSMINOR(aoemajor, h->minor); - if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) { - printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n", + if (aoemajor == 0xffff) { + pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n", + aoemajor, (int) h->minor); + return; + } + if (h->minor == 0xff) { + pr_info("aoe: e%ld.%d: broadcast slot number invalid\n", aoemajor, (int) h->minor); return; } @@ -997,9 +1364,9 @@ aoecmd_cfg_rsp(struct sk_buff *skb) if (n > aoe_maxout) /* keep it reasonable */ n = aoe_maxout; - d = aoedev_by_sysminor_m(sysminor); + d = aoedev_by_aoeaddr(aoemajor, h->minor, 1); if (d == NULL) { - printk(KERN_INFO "aoe: device sysminor_m failure\n"); + pr_info("aoe: device allocation failure\n"); return; } @@ -1008,52 +1375,26 @@ aoecmd_cfg_rsp(struct sk_buff *skb) t = gettgt(d, h->src); if (!t) { t = addtgt(d, h->src, n); - if (!t) { - spin_unlock_irqrestore(&d->lock, flags); - return; - } - } - ifp = getif(t, skb->dev); - if (!ifp) { - ifp = addif(t, skb->dev); - if (!ifp) { - printk(KERN_INFO - "aoe: device addif failure; " - "too many interfaces?\n"); - spin_unlock_irqrestore(&d->lock, flags); - return; - } - } - if (ifp->maxbcnt) { - n = ifp->nd->mtu; - n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); - n /= 512; - if (n > ch->scnt) - n = ch->scnt; - n = n ? n * 512 : DEFAULTBCNT; - if (n != ifp->maxbcnt) { - printk(KERN_INFO - "aoe: e%ld.%d: setting %d%s%s:%pm\n", - d->aoemajor, d->aoeminor, n, - " byte data frames on ", ifp->nd->name, - t->addr); - ifp->maxbcnt = n; - } + if (!t) + goto bail; } + n = skb->dev->mtu; + n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr); + n /= 512; + if (n > ch->scnt) + n = ch->scnt; + n = n ? n * 512 : DEFAULTBCNT; + setifbcnt(t, skb->dev, n); /* don't change users' perspective */ - if (d->nopen) { - spin_unlock_irqrestore(&d->lock, flags); - return; + if (d->nopen == 0) { + d->fw_ver = be16_to_cpu(ch->fwver); + sl = aoecmd_ata_id(d); } - d->fw_ver = be16_to_cpu(ch->fwver); - - sl = aoecmd_ata_id(d); - +bail: spin_unlock_irqrestore(&d->lock, flags); - + aoedev_put(d); if (sl) { - struct sk_buff_head queue; __skb_queue_head_init(&queue); __skb_queue_tail(&queue, sl); aoenet_xmit(&queue); @@ -1064,20 +1405,74 @@ void aoecmd_cleanslate(struct aoedev *d) { struct aoetgt **t, **te; - struct aoeif *p, *e; d->mintimer = MINTIMER; + d->maxbcnt = 0; t = d->targets; te = t + NTARGETS; - for (; t < te && *t; t++) { + for (; t < te && *t; t++) (*t)->maxout = (*t)->nframes; - p = (*t)->ifs; - e = p + NAOEIFS; - for (; p < e; p++) { - p->lostjumbo = 0; - p->lost = 0; - p->maxbcnt = DEFAULTBCNT; +} + +void +aoe_failbuf(struct aoedev *d, struct buf *buf) +{ + if (buf == NULL) + return; + buf->resid = 0; + clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); + if (buf->nframesout == 0) + aoe_end_buf(d, buf); +} + +void +aoe_flush_iocq(void) +{ + struct frame *f; + struct aoedev *d; + LIST_HEAD(flist); + struct list_head *pos; + struct sk_buff *skb; + ulong flags; + + spin_lock_irqsave(&iocq.lock, flags); + list_splice_init(&iocq.head, &flist); + spin_unlock_irqrestore(&iocq.lock, flags); + while (!list_empty(&flist)) { + pos = flist.next; + list_del(pos); + f = list_entry(pos, struct frame, head); + d = f->t->d; + skb = f->r_skb; + spin_lock_irqsave(&d->lock, flags); + if (f->buf) { + f->buf->nframesout--; + aoe_failbuf(d, f->buf); } + aoe_freetframe(f); + spin_unlock_irqrestore(&d->lock, flags); + dev_kfree_skb(skb); + aoedev_put(d); } } + +int __init +aoecmd_init(void) +{ + INIT_LIST_HEAD(&iocq.head); + spin_lock_init(&iocq.lock); + init_waitqueue_head(&ktiowq); + kts.name = "aoe_ktio"; + kts.fn = ktio; + kts.waitq = &ktiowq; + kts.lock = &iocq.lock; + return aoe_ktstart(&kts); +} + +void +aoecmd_exit(void) +{ + aoe_ktstop(&kts); + aoe_flush_iocq(); +} diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 6b5110a47458..90e5b537f94b 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoedev.c * AoE device utility functions; maintains device list. @@ -9,6 +9,9 @@ #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> +#include <linux/bitmap.h> +#include <linux/kdev_t.h> +#include <linux/moduleparam.h> #include "aoe.h" static void dummy_timer(ulong); @@ -16,23 +19,121 @@ static void aoedev_freedev(struct aoedev *); static void freetgt(struct aoedev *d, struct aoetgt *t); static void skbpoolfree(struct aoedev *d); +static int aoe_dyndevs = 1; +module_param(aoe_dyndevs, int, 0644); +MODULE_PARM_DESC(aoe_dyndevs, "Use dynamic minor numbers for devices."); + static struct aoedev *devlist; static DEFINE_SPINLOCK(devlist_lock); -struct aoedev * -aoedev_by_aoeaddr(int maj, int min) +/* Because some systems will have one, many, or no + * - partitions, + * - slots per shelf, + * - or shelves, + * we need some flexibility in the way the minor numbers + * are allocated. So they are dynamic. + */ +#define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS) + +static DEFINE_SPINLOCK(used_minors_lock); +static DECLARE_BITMAP(used_minors, N_DEVS); + +static int +minor_get_dyn(ulong *sysminor) { - struct aoedev *d; ulong flags; + ulong n; + int error = 0; + + spin_lock_irqsave(&used_minors_lock, flags); + n = find_first_zero_bit(used_minors, N_DEVS); + if (n < N_DEVS) + set_bit(n, used_minors); + else + error = -1; + spin_unlock_irqrestore(&used_minors_lock, flags); + + *sysminor = n * AOE_PARTITIONS; + return error; +} - spin_lock_irqsave(&devlist_lock, flags); +static int +minor_get_static(ulong *sysminor, ulong aoemaj, int aoemin) +{ + ulong flags; + ulong n; + int error = 0; + enum { + /* for backwards compatibility when !aoe_dyndevs, + * a static number of supported slots per shelf */ + NPERSHELF = 16, + }; + + n = aoemaj * NPERSHELF + aoemin; + if (aoemin >= NPERSHELF || n >= N_DEVS) { + pr_err("aoe: %s with e%ld.%d\n", + "cannot use static minor device numbers", + aoemaj, aoemin); + error = -1; + } else { + spin_lock_irqsave(&used_minors_lock, flags); + if (test_bit(n, used_minors)) { + pr_err("aoe: %s %lu\n", + "existing device already has static minor number", + n); + error = -1; + } else + set_bit(n, used_minors); + spin_unlock_irqrestore(&used_minors_lock, flags); + } - for (d=devlist; d; d=d->next) - if (d->aoemajor == maj && d->aoeminor == min) - break; + *sysminor = n; + return error; +} + +static int +minor_get(ulong *sysminor, ulong aoemaj, int aoemin) +{ + if (aoe_dyndevs) + return minor_get_dyn(sysminor); + else + return minor_get_static(sysminor, aoemaj, aoemin); +} + +static void +minor_free(ulong minor) +{ + ulong flags; + + minor /= AOE_PARTITIONS; + BUG_ON(minor >= N_DEVS); + + spin_lock_irqsave(&used_minors_lock, flags); + BUG_ON(!test_bit(minor, used_minors)); + clear_bit(minor, used_minors); + spin_unlock_irqrestore(&used_minors_lock, flags); +} + +/* + * Users who grab a pointer to the device with aoedev_by_aoeaddr + * automatically get a reference count and must be responsible + * for performing a aoedev_put. With the addition of async + * kthread processing I'm no longer confident that we can + * guarantee consistency in the face of device flushes. + * + * For the time being, we only bother to add extra references for + * frames sitting on the iocq. When the kthreads finish processing + * these frames, they will aoedev_put the device. + */ + +void +aoedev_put(struct aoedev *d) +{ + ulong flags; + spin_lock_irqsave(&devlist_lock, flags); + d->ref--; spin_unlock_irqrestore(&devlist_lock, flags); - return d; } static void @@ -47,54 +148,74 @@ dummy_timer(ulong vp) add_timer(&d->timer); } +static void +aoe_failip(struct aoedev *d) +{ + struct request *rq; + struct bio *bio; + unsigned long n; + + aoe_failbuf(d, d->ip.buf); + + rq = d->ip.rq; + if (rq == NULL) + return; + while ((bio = d->ip.nxbio)) { + clear_bit(BIO_UPTODATE, &bio->bi_flags); + d->ip.nxbio = bio->bi_next; + n = (unsigned long) rq->special; + rq->special = (void *) --n; + } + if ((unsigned long) rq->special == 0) + aoe_end_request(d, rq, 0); +} + void aoedev_downdev(struct aoedev *d) { - struct aoetgt **t, **te; - struct frame *f, *e; - struct buf *buf; - struct bio *bio; + struct aoetgt *t, **tt, **te; + struct frame *f; + struct list_head *head, *pos, *nx; + struct request *rq; + int i; - t = d->targets; - te = t + NTARGETS; - for (; t < te && *t; t++) { - f = (*t)->frames; - e = f + (*t)->nframes; - for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { - if (f->tag == FREETAG || f->buf == NULL) - continue; - buf = f->buf; - bio = buf->bio; - if (--buf->nframesout == 0 - && buf != d->inprocess) { - mempool_free(buf, d->bufpool); - bio_endio(bio, -EIO); + d->flags &= ~DEVFL_UP; + + /* clean out active buffers */ + for (i = 0; i < NFACTIVE; i++) { + head = &d->factive[i]; + list_for_each_safe(pos, nx, head) { + f = list_entry(pos, struct frame, head); + list_del(pos); + if (f->buf) { + f->buf->nframesout--; + aoe_failbuf(d, f->buf); } + aoe_freetframe(f); } - (*t)->maxout = (*t)->nframes; - (*t)->nout = 0; } - buf = d->inprocess; - if (buf) { - bio = buf->bio; - mempool_free(buf, d->bufpool); - bio_endio(bio, -EIO); + /* reset window dressings */ + tt = d->targets; + te = tt + NTARGETS; + for (; tt < te && (t = *tt); tt++) { + t->maxout = t->nframes; + t->nout = 0; } - d->inprocess = NULL; + + /* clean out the in-process request (if any) */ + aoe_failip(d); d->htgt = NULL; - while (!list_empty(&d->bufq)) { - buf = container_of(d->bufq.next, struct buf, bufs); - list_del(d->bufq.next); - bio = buf->bio; - mempool_free(buf, d->bufpool); - bio_endio(bio, -EIO); + /* fast fail all pending I/O */ + if (d->blkq) { + while ((rq = blk_peek_request(d->blkq))) { + blk_start_request(rq); + aoe_end_request(d, rq, 1); + } } if (d->gd) set_capacity(d->gd, 0); - - d->flags &= ~DEVFL_UP; } static void @@ -107,6 +228,7 @@ aoedev_freedev(struct aoedev *d) aoedisk_rm_sysfs(d); del_gendisk(d->gd); put_disk(d->gd); + blk_cleanup_queue(d->blkq); } t = d->targets; e = t + NTARGETS; @@ -115,7 +237,7 @@ aoedev_freedev(struct aoedev *d) if (d->bufpool) mempool_destroy(d->bufpool); skbpoolfree(d); - blk_cleanup_queue(d->blkq); + minor_free(d->sysminor); kfree(d); } @@ -142,7 +264,8 @@ aoedev_flush(const char __user *str, size_t cnt) spin_lock(&d->lock); if ((!all && (d->flags & DEVFL_UP)) || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) - || d->nopen) { + || d->nopen + || d->ref) { spin_unlock(&d->lock); dd = &d->next; continue; @@ -163,12 +286,15 @@ aoedev_flush(const char __user *str, size_t cnt) return 0; } -/* I'm not really sure that this is a realistic problem, but if the -network driver goes gonzo let's just leak memory after complaining. */ +/* This has been confirmed to occur once with Tms=3*1000 due to the + * driver changing link and not processing its transmit ring. The + * problem is hard enough to solve by returning an error that I'm + * still punting on "solving" this. + */ static void skbfree(struct sk_buff *skb) { - enum { Sms = 100, Tms = 3*1000}; + enum { Sms = 250, Tms = 30 * 1000}; int i = Tms / Sms; if (skb == NULL) @@ -182,6 +308,7 @@ skbfree(struct sk_buff *skb) "cannot free skb -- memory leaked."); return; } + skb->truesize -= skb->data_len; skb_shinfo(skb)->nr_frags = skb->data_len = 0; skb_trim(skb, 0); dev_kfree_skb(skb); @@ -198,26 +325,29 @@ skbpoolfree(struct aoedev *d) __skb_queue_head_init(&d->skbpool); } -/* find it or malloc it */ +/* find it or allocate it */ struct aoedev * -aoedev_by_sysminor_m(ulong sysminor) +aoedev_by_aoeaddr(ulong maj, int min, int do_alloc) { struct aoedev *d; + int i; ulong flags; + ulong sysminor; spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) - if (d->sysminor == sysminor) + if (d->aoemajor == maj && d->aoeminor == min) { + d->ref++; break; - if (d) + } + if (d || !do_alloc || minor_get(&sysminor, maj, min) < 0) goto out; d = kcalloc(1, sizeof *d, GFP_ATOMIC); if (!d) goto out; INIT_WORK(&d->work, aoecmd_sleepwork); spin_lock_init(&d->lock); - skb_queue_head_init(&d->sendq); skb_queue_head_init(&d->skbpool); init_timer(&d->timer); d->timer.data = (ulong) d; @@ -226,10 +356,12 @@ aoedev_by_sysminor_m(ulong sysminor) add_timer(&d->timer); d->bufpool = NULL; /* defer to aoeblk_gdalloc */ d->tgt = d->targets; - INIT_LIST_HEAD(&d->bufq); + d->ref = 1; + for (i = 0; i < NFACTIVE; i++) + INIT_LIST_HEAD(&d->factive[i]); d->sysminor = sysminor; - d->aoemajor = AOEMAJOR(sysminor); - d->aoeminor = AOEMINOR(sysminor); + d->aoemajor = maj; + d->aoeminor = min; d->mintimer = MINTIMER; d->next = devlist; devlist = d; @@ -241,13 +373,23 @@ aoedev_by_sysminor_m(ulong sysminor) static void freetgt(struct aoedev *d, struct aoetgt *t) { - struct frame *f, *e; + struct frame *f; + struct list_head *pos, *nx, *head; + struct aoeif *ifp; - f = t->frames; - e = f + t->nframes; - for (; f < e; f++) + for (ifp = t->ifs; ifp < &t->ifs[NAOEIFS]; ++ifp) { + if (!ifp->nd) + break; + dev_put(ifp->nd); + } + + head = &t->ffree; + list_for_each_safe(pos, nx, head) { + list_del(pos); + f = list_entry(pos, struct frame, head); skbfree(f->skb); - kfree(t->frames); + kfree(f); + } kfree(t); } @@ -257,6 +399,7 @@ aoedev_exit(void) struct aoedev *d; ulong flags; + aoe_flush_iocq(); while ((d = devlist)) { devlist = d->next; diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c index 7f83ad90e76f..04793c2c701b 100644 --- a/drivers/block/aoe/aoemain.c +++ b/drivers/block/aoe/aoemain.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoemain.c * Module initialization routines, discover timer @@ -61,6 +61,7 @@ aoe_exit(void) aoenet_exit(); unregister_blkdev(AOE_MAJOR, DEVICE_NAME); + aoecmd_exit(); aoechr_exit(); aoedev_exit(); aoeblk_exit(); /* free cache after de-allocating bufs */ @@ -83,17 +84,20 @@ aoe_init(void) ret = aoenet_init(); if (ret) goto net_fail; + ret = aoecmd_init(); + if (ret) + goto cmd_fail; ret = register_blkdev(AOE_MAJOR, DEVICE_NAME); if (ret < 0) { printk(KERN_ERR "aoe: can't register major\n"); goto blkreg_fail; } - printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION); discover_timer(TINIT); return 0; - blkreg_fail: + aoecmd_exit(); + cmd_fail: aoenet_exit(); net_fail: aoeblk_exit(); diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 4d3bc0d49df5..162c6471275c 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ +/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ /* * aoenet.c * Ethernet portion of AoE driver @@ -33,6 +33,9 @@ static char aoe_iflist[IFLISTSZ]; module_param_string(aoe_iflist, aoe_iflist, IFLISTSZ, 0600); MODULE_PARM_DESC(aoe_iflist, "aoe_iflist=\"dev1 [dev2 ...]\""); +static wait_queue_head_t txwq; +static struct ktstate kts; + #ifndef MODULE static int __init aoe_iflist_setup(char *str) { @@ -44,6 +47,23 @@ static int __init aoe_iflist_setup(char *str) __setup("aoe_iflist=", aoe_iflist_setup); #endif +static spinlock_t txlock; +static struct sk_buff_head skbtxq; + +/* enters with txlock held */ +static int +tx(void) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(&skbtxq))) { + spin_unlock_irq(&txlock); + dev_queue_xmit(skb); + spin_lock_irq(&txlock); + } + return 0; +} + int is_aoe_netif(struct net_device *ifp) { @@ -88,10 +108,14 @@ void aoenet_xmit(struct sk_buff_head *queue) { struct sk_buff *skb, *tmp; + ulong flags; skb_queue_walk_safe(queue, skb, tmp) { __skb_unlink(skb, queue); - dev_queue_xmit(skb); + spin_lock_irqsave(&txlock, flags); + skb_queue_tail(&skbtxq, skb); + spin_unlock_irqrestore(&txlock, flags); + wake_up(&txwq); } } @@ -102,7 +126,9 @@ static int aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, struct net_device *orig_dev) { struct aoe_hdr *h; + struct aoe_atahdr *ah; u32 n; + int sn; if (dev_net(ifp) != &init_net) goto exit; @@ -110,13 +136,16 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) return 0; - if (skb_linearize(skb)) - goto exit; if (!is_aoe_netif(ifp)) goto exit; skb_push(skb, ETH_HLEN); /* (1) */ - - h = (struct aoe_hdr *) skb_mac_header(skb); + sn = sizeof(*h) + sizeof(*ah); + if (skb->len >= sn) { + sn -= skb_headlen(skb); + if (sn > 0 && !__pskb_pull_tail(skb, sn)) + goto exit; + } + h = (struct aoe_hdr *) skb->data; n = get_unaligned_be32(&h->tag); if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) goto exit; @@ -137,7 +166,8 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, switch (h->cmd) { case AOECMD_ATA: - aoecmd_ata_rsp(skb); + /* ata_rsp may keep skb for later processing or give it back */ + skb = aoecmd_ata_rsp(skb); break; case AOECMD_CFG: aoecmd_cfg_rsp(skb); @@ -145,8 +175,12 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, default: if (h->cmd >= AOECMD_VEND_MIN) break; /* don't complain about vendor commands */ - printk(KERN_INFO "aoe: unknown cmd %d\n", h->cmd); + pr_info("aoe: unknown AoE command type 0x%02x\n", h->cmd); + break; } + + if (!skb) + return 0; exit: dev_kfree_skb(skb); return 0; @@ -160,6 +194,15 @@ static struct packet_type aoe_pt __read_mostly = { int __init aoenet_init(void) { + skb_queue_head_init(&skbtxq); + init_waitqueue_head(&txwq); + spin_lock_init(&txlock); + kts.lock = &txlock; + kts.fn = tx; + kts.waitq = &txwq; + kts.name = "aoe_tx"; + if (aoe_ktstart(&kts)) + return -EAGAIN; dev_add_pack(&aoe_pt); return 0; } @@ -167,6 +210,8 @@ aoenet_init(void) void aoenet_exit(void) { + aoe_ktstop(&kts); + skb_queue_purge(&skbtxq); dev_remove_pack(&aoe_pt); } diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 38aa6dda6b81..da3311129a0c 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c @@ -795,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a7d6347aaa79..17c675c52295 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -672,7 +672,6 @@ static void __reschedule_timeout(int drive, const char *message) if (drive == current_reqD) drive = current_drive; - __cancel_delayed_work(&fd_timeout); if (drive < 0 || drive >= N_DRIVE) { delay = 20UL * HZ; @@ -680,7 +679,7 @@ static void __reschedule_timeout(int drive, const char *message) } else delay = UDP->timeout; - queue_delayed_work(floppy_wq, &fd_timeout, delay); + mod_delayed_work(floppy_wq, &fd_timeout, delay); if (UDP->flags & FD_DEBUG) DPRINT("reschedule timeout %s\n", message); timeout_message = message; @@ -891,7 +890,7 @@ static void unlock_fdc(void) raw_cmd = NULL; command_status = FD_COMMAND_NONE; - __cancel_delayed_work(&fd_timeout); + cancel_delayed_work(&fd_timeout); do_floppy = NULL; cont = NULL; clear_bit(0, &fdc_busy); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 3bba65510d23..e9d594fd12cb 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1038,10 +1038,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) { int err; struct loop_func_table *xfer; - uid_t uid = current_uid(); + kuid_t uid = current_uid(); if (lo->lo_encrypt_key_size && - lo->lo_key_owner != uid && + !uid_eq(lo->lo_key_owner, uid) && !capable(CAP_SYS_ADMIN)) return -EPERM; if (lo->lo_state != Lo_bound) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index a8fddeb3d638..f946d31d6917 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -1148,11 +1148,15 @@ static bool mtip_pause_ncq(struct mtip_port *port, reply = port->rxfis + RX_FIS_D2H_REG; task_file_data = readl(port->mmio+PORT_TFDATA); - if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT)) + if (fis->command == ATA_CMD_SEC_ERASE_UNIT) + clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); + + if ((task_file_data & 1)) return false; if (fis->command == ATA_CMD_SEC_ERASE_PREP) { set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); port->ic_pause_timer = jiffies; return true; } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && @@ -1900,7 +1904,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, int rv = 0, xfer_sz = command[3]; if (xfer_sz) { - if (user_buffer) + if (!user_buffer) return -EFAULT; buf = dmam_alloc_coherent(&port->dd->pdev->dev, @@ -2043,7 +2047,7 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout) *timeout = 240000; /* 4 minutes */ break; case ATA_CMD_STANDBYNOW1: - *timeout = 10000; /* 10 seconds */ + *timeout = 120000; /* 2 minutes */ break; case 0xF7: case 0xFA: @@ -2588,9 +2592,6 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) @@ -2660,9 +2661,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", @@ -3214,8 +3212,8 @@ static int mtip_hw_init(struct driver_data *dd) "Unable to check write protect progress\n"); else dev_info(&dd->pdev->dev, - "Write protect progress: %d%% (%d blocks)\n", - attr242.cur, attr242.data); + "Write protect progress: %u%% (%u blocks)\n", + attr242.cur, le32_to_cpu(attr242.data)); return rv; out3: @@ -3619,6 +3617,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) bio_endio(bio, -ENODATA); return; } + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { + bio_endio(bio, -ENODATA); + return; + } } if (unlikely(!bio_has_data(bio))) { @@ -4168,7 +4170,13 @@ static void mtip_pci_shutdown(struct pci_dev *pdev) /* Table of device ids supported by this driver. */ static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { - { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, { 0 } }; @@ -4199,12 +4207,12 @@ static int __init mtip_init(void) { int error; - printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); + pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { - printk(KERN_ERR "Unable to register block device (%d)\n", + pr_err("Unable to register block device (%d)\n", error); return -EBUSY; } @@ -4213,7 +4221,7 @@ static int __init mtip_init(void) if (!dfs_parent) { dfs_parent = debugfs_create_dir("rssd", NULL); if (IS_ERR_OR_NULL(dfs_parent)) { - printk(KERN_WARNING "Error creating debugfs parent\n"); + pr_warn("Error creating debugfs parent\n"); dfs_parent = NULL; } } diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index f51fc23d17bb..18627a1d04c5 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -76,7 +76,13 @@ /* Micron Vendor ID & P320x SSD Device ID */ #define PCI_VENDOR_ID_MICRON 0x1344 -#define P320_DEVICE_ID 0x5150 +#define P320H_DEVICE_ID 0x5150 +#define P320M_DEVICE_ID 0x5151 +#define P320S_DEVICE_ID 0x5152 +#define P325M_DEVICE_ID 0x5153 +#define P420H_DEVICE_ID 0x5160 +#define P420M_DEVICE_ID 0x5161 +#define P425M_DEVICE_ID 0x5163 /* Driver name and version strings */ #define MTIP_DRV_NAME "mtip32xx" @@ -131,10 +137,12 @@ enum { MTIP_PF_SVC_THD_STOP_BIT = 8, /* below are bit numbers in 'dd_flag' defined in driver_data */ + MTIP_DDF_SEC_LOCK_BIT = 0, MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ + (1 << MTIP_DDF_SEC_LOCK_BIT) | \ (1 << MTIP_DDF_OVER_TEMP_BIT) | \ (1 << MTIP_DDF_WRITE_PROTECT_BIT)), diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index d07c9f7fded6..043ddcca4abf 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -78,6 +78,8 @@ static const char *ioctl_cmd_to_ascii(int cmd) case NBD_SET_SOCK: return "set-sock"; case NBD_SET_BLKSIZE: return "set-blksize"; case NBD_SET_SIZE: return "set-size"; + case NBD_SET_TIMEOUT: return "set-timeout"; + case NBD_SET_FLAGS: return "set-flags"; case NBD_DO_IT: return "do-it"; case NBD_CLEAR_SOCK: return "clear-sock"; case NBD_CLEAR_QUE: return "clear-que"; @@ -96,6 +98,7 @@ static const char *nbdcmd_to_ascii(int cmd) case NBD_CMD_READ: return "read"; case NBD_CMD_WRITE: return "write"; case NBD_CMD_DISC: return "disconnect"; + case NBD_CMD_TRIM: return "trim/discard"; } return "invalid"; } @@ -449,6 +452,14 @@ static void nbd_clear_que(struct nbd_device *nbd) req->errors++; nbd_end_request(req); } + + while (!list_empty(&nbd->waiting_queue)) { + req = list_entry(nbd->waiting_queue.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } @@ -459,8 +470,12 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { - nbd_cmd(req) = NBD_CMD_WRITE; - if (nbd->flags & NBD_READ_ONLY) { + if ((req->cmd_flags & REQ_DISCARD)) { + WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM)); + nbd_cmd(req) = NBD_CMD_TRIM; + } else + nbd_cmd(req) = NBD_CMD_WRITE; + if (nbd->flags & NBD_FLAG_READ_ONLY) { dev_err(disk_to_dev(nbd->disk), "Write on read-only\n"); goto error_out; @@ -598,6 +613,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); + BUG_ON(!list_empty(&nbd->waiting_queue)); if (file) fput(file); return 0; @@ -642,6 +658,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->xmit_timeout = arg * HZ; return 0; + case NBD_SET_FLAGS: + nbd->flags = arg; + return 0; + case NBD_SET_SIZE_BLOCKS: nbd->bytesize = ((u64) arg) * nbd->blksize; bdev->bd_inode->i_size = nbd->bytesize; @@ -661,6 +681,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, mutex_unlock(&nbd->tx_lock); + if (nbd->flags & NBD_FLAG_SEND_TRIM) + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + nbd->disk->queue); + thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); @@ -678,6 +702,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); + queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (file) fput(file); nbd->bytesize = 0; @@ -796,6 +821,9 @@ static int __init nbd_init(void) * Tell the block layer that we are not a rotational device */ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); + disk->queue->limits.discard_granularity = 512; + disk->queue->limits.max_discard_sectors = UINT_MAX; + disk->queue->limits.discard_zeroes_data = 0; } if (register_blkdev(NBD_MAJOR, "nbd")) { diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 38a2d0631882..931769e133e5 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c @@ -79,6 +79,7 @@ struct nvme_dev { char serial[20]; char model[40]; char firmware_rev[8]; + u32 max_hw_sectors; }; /* @@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, } static int nvme_get_features(struct nvme_dev *dev, unsigned fid, - unsigned dword11, dma_addr_t dma_addr) + unsigned nsid, dma_addr_t dma_addr) { struct nvme_command c; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; + c.features.nsid = cpu_to_le32(nsid); c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - c.features.dword11 = cpu_to_le32(dword11); return nvme_submit_admin_cmd(dev, &c, NULL); } @@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid, return nvme_submit_admin_cmd(dev, &c, result); } +/** + * nvme_cancel_ios - Cancel outstanding I/Os + * @queue: The queue to cancel I/Os on + * @timeout: True to only cancel I/Os which have timed out + */ +static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + unsigned long now = jiffies; + int cmdid; + + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { + void *ctx; + nvme_completion_fn fn; + static struct nvme_completion cqe = { + .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, + }; + + if (timeout && !time_after(now, info[cmdid].timeout)) + continue; + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + ctx = cancel_cmdid(nvmeq, cmdid, &fn); + fn(nvmeq->dev, ctx, &cqe); + } +} + +static void nvme_free_queue_mem(struct nvme_queue *nvmeq) +{ + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); +} + static void nvme_free_queue(struct nvme_dev *dev, int qid) { struct nvme_queue *nvmeq = dev->queues[qid]; int vector = dev->entry[nvmeq->cq_vector].vector; + spin_lock_irq(&nvmeq->q_lock); + nvme_cancel_ios(nvmeq, false); + spin_unlock_irq(&nvmeq->q_lock); + irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); @@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) adapter_delete_cq(dev, qid); } - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); + nvme_free_queue_mem(nvmeq); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { struct device *dmadev = &dev->pci_dev->dev; - unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); + unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * + sizeof(struct nvme_cmd_info)); struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); if (!nvmeq) return NULL; @@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) { - int result; + int result = 0; u32 aqa; u64 cap; unsigned long timeout; @@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; dev->db_stride = NVME_CAP_STRIDE(cap); - while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { + while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { msleep(100); if (fatal_signal_pending(current)) - return -EINTR; + result = -EINTR; if (time_after(jiffies, timeout)) { dev_err(&dev->pci_dev->dev, "Device not ready; aborting initialisation\n"); - return -ENODEV; + result = -ENODEV; } } + if (result) { + nvme_free_queue_mem(nvmeq); + return result; + } + result = queue_request_irq(dev, nvmeq, "nvme admin"); dev->queues[0] = nvmeq; return result; @@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, offset = offset_in_page(addr); count = DIV_ROUND_UP(offset + length, PAGE_SIZE); pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); err = get_user_pages_fast(addr, count, 1, pages); if (err < count) { @@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) return status; } -static int nvme_user_admin_cmd(struct nvme_ns *ns, +static int nvme_user_admin_cmd(struct nvme_dev *dev, struct nvme_admin_cmd __user *ucmd) { - struct nvme_dev *dev = ns->dev; struct nvme_admin_cmd cmd; struct nvme_command c; int status, length; - struct nvme_iod *iod; + struct nvme_iod *uninitialized_var(iod); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, case NVME_IOCTL_ID: return ns->ns_id; case NVME_IOCTL_ADMIN_CMD: - return nvme_user_admin_cmd(ns, (void __user *)arg); + return nvme_user_admin_cmd(ns->dev, (void __user *)arg); case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, (void __user *)arg); default: @@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = { .compat_ioctl = nvme_ioctl, }; -static void nvme_timeout_ios(struct nvme_queue *nvmeq) -{ - int depth = nvmeq->q_depth - 1; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - unsigned long now = jiffies; - int cmdid; - - for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { - void *ctx; - nvme_completion_fn fn; - static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; - - if (!time_after(now, info[cmdid].timeout)) - continue; - dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); - ctx = cancel_cmdid(nvmeq, cmdid, &fn); - fn(nvmeq->dev, ctx, &cqe); - } -} - static void nvme_resubmit_bios(struct nvme_queue *nvmeq) { while (bio_list_peek(&nvmeq->sq_cong)) { @@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data) spin_lock_irq(&nvmeq->q_lock); if (nvme_process_cq(nvmeq)) printk("process_cq did something\n"); - nvme_timeout_ios(nvmeq); + nvme_cancel_ios(nvmeq, true); nvme_resubmit_bios(nvmeq); spin_unlock_irq(&nvmeq->q_lock); } @@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ns->disk = disk; lbaf = id->flbas & 0xf; ns->lba_shift = id->lbaf[lbaf].ds; + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); + if (dev->max_hw_sectors) + blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); disk->major = nvme_major; disk->minors = NVME_MINORS; @@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) { - int result, cpu, i, nr_io_queues, db_bar_size; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) cpu = cpumask_next(cpu, cpu_online_mask); } + q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, + NVME_Q_DEPTH); for (i = 0; i < nr_io_queues; i++) { - dev->queues[i + 1] = nvme_create_queue(dev, i + 1, - NVME_Q_DEPTH, i); + dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); if (IS_ERR(dev->queues[i + 1])) return PTR_ERR(dev->queues[i + 1]); dev->queue_count++; @@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev) memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); + if (ctrl->mdts) { + int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); + } id_ns = mem; for (i = 1; i <= nn; i++) { @@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev) list_del(&dev->node); spin_unlock(&dev_list_lock); - /* TODO: wait all I/O finished or cancel them */ - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_del(&ns->list); del_gendisk(ns->disk); @@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) dma_pool_destroy(dev->prp_small_pool); } -/* XXX: Use an ida or something to let remove / add work correctly */ -static void nvme_set_instance(struct nvme_dev *dev) +static DEFINE_IDA(nvme_instance_ida); + +static int nvme_set_instance(struct nvme_dev *dev) { - static int instance; - dev->instance = instance++; + int instance, error; + + do { + if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) + return -ENODEV; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_instance_ida, &instance); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + return -ENODEV; + + dev->instance = instance; + return 0; } static void nvme_release_instance(struct nvme_dev *dev) { + spin_lock(&dev_list_lock); + ida_remove(&nvme_instance_ida, dev->instance); + spin_unlock(&dev_list_lock); } static int __devinit nvme_probe(struct pci_dev *pdev, @@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - nvme_set_instance(dev); + result = nvme_set_instance(dev); + if (result) + goto disable; + dev->entry[0].vector = pdev->irq; result = nvme_setup_prp_pools(dev); @@ -1675,7 +1726,7 @@ static void __devexit nvme_remove(struct pci_dev *pdev) #define nvme_suspend NULL #define nvme_resume NULL -static struct pci_error_handlers nvme_err_handler = { +static const struct pci_error_handlers nvme_err_handler = { .error_detected = nvme_error_detected, .mmio_enabled = nvme_dump_registers, .link_reset = nvme_link_reset, @@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = { static int __init nvme_init(void) { - int result = -EBUSY; + int result; nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); if (IS_ERR(nvme_thread)) return PTR_ERR(nvme_thread); - nvme_major = register_blkdev(nvme_major, "nvme"); - if (nvme_major <= 0) + result = register_blkdev(nvme_major, "nvme"); + if (result < 0) goto kill_kthread; + else if (result > 0) + nvme_major = result; result = pci_register_driver(&nvme_driver); if (result) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 9917943a3572..bb3d9be3b1b4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -41,6 +41,8 @@ #include "rbd_types.h" +#define RBD_DEBUG /* Activate rbd_assert() calls */ + /* * The basic unit of block I/O is a sector. It is interpreted in a * number of contexts in Linux (blk, bio, genhd), but the default is @@ -50,16 +52,24 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +/* It might be useful to have this defined elsewhere too */ + +#define U64_MAX ((u64) (~0ULL)) + #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ #define RBD_MAX_SNAP_NAME_LEN 32 +#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ #define RBD_MAX_OPT_LEN 1024 #define RBD_SNAP_HEAD_NAME "-" +#define RBD_IMAGE_ID_LEN_MAX 64 +#define RBD_OBJ_PREFIX_LEN_MAX 64 + /* * An RBD device name will be "rbd#", where the "rbd" comes from * RBD_DRV_NAME above, and # is a unique integer identifier. @@ -69,21 +79,22 @@ #define DEV_NAME_LEN 32 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1) -#define RBD_NOTIFY_TIMEOUT_DEFAULT 10 +#define RBD_READ_ONLY_DEFAULT false /* * block device image metadata (in-memory version) */ struct rbd_image_header { - u64 image_size; + /* These four fields never change for a given rbd image */ char *object_prefix; + u64 features; __u8 obj_order; __u8 crypt_type; __u8 comp_type; - struct ceph_snap_context *snapc; - size_t snap_names_len; - u32 total_snaps; + /* The remaining fields need to be updated occasionally */ + u64 image_size; + struct ceph_snap_context *snapc; char *snap_names; u64 *snap_sizes; @@ -91,7 +102,7 @@ struct rbd_image_header { }; struct rbd_options { - int notify_timeout; + bool read_only; }; /* @@ -99,7 +110,6 @@ struct rbd_options { */ struct rbd_client { struct ceph_client *client; - struct rbd_options *rbd_opts; struct kref kref; struct list_head node; }; @@ -141,6 +151,16 @@ struct rbd_snap { u64 size; struct list_head node; u64 id; + u64 features; +}; + +struct rbd_mapping { + char *snap_name; + u64 snap_id; + u64 size; + u64 features; + bool snap_exists; + bool read_only; }; /* @@ -151,8 +171,9 @@ struct rbd_device { int major; /* blkdev assigned major */ struct gendisk *disk; /* blkdev's gendisk and rq */ - struct request_queue *q; + u32 image_format; /* Either 1 or 2 */ + struct rbd_options rbd_opts; struct rbd_client *rbd_client; char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ @@ -160,6 +181,8 @@ struct rbd_device { spinlock_t lock; /* queue lock */ struct rbd_image_header header; + char *image_id; + size_t image_id_len; char *image_name; size_t image_name_len; char *header_name; @@ -171,13 +194,8 @@ struct rbd_device { /* protects updating the header */ struct rw_semaphore header_rwsem; - /* name of the snapshot this device reads from */ - char *snap_name; - /* id of the snapshot this device reads from */ - u64 snap_id; /* current snapshot id */ - /* whether the snap_id this device reads from still exists */ - bool snap_exists; - int read_only; + + struct rbd_mapping mapping; struct list_head node; @@ -196,12 +214,10 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); -static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); +static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); +static int rbd_dev_snaps_register(struct rbd_device *rbd_dev); + static void rbd_dev_release(struct device *dev); -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count); static void __rbd_remove_snap_dev(struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, @@ -229,6 +245,18 @@ static struct device rbd_root_dev = { .release = rbd_root_dev_release, }; +#ifdef RBD_DEBUG +#define rbd_assert(expr) \ + if (unlikely(!(expr))) { \ + printk(KERN_ERR "\nAssertion failure in %s() " \ + "at line %d:\n\n" \ + "\trbd_assert(%s);\n\n", \ + __func__, __LINE__, #expr); \ + BUG(); \ + } +#else /* !RBD_DEBUG */ +# define rbd_assert(expr) ((void) 0) +#endif /* !RBD_DEBUG */ static struct device *rbd_get_dev(struct rbd_device *rbd_dev) { @@ -246,13 +274,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - rbd_get_dev(rbd_dev); - - set_device_ro(bdev, rbd_dev->read_only); - - if ((mode & FMODE_WRITE) && rbd_dev->read_only) + if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only) return -EROFS; + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->mapping.read_only); + return 0; } @@ -275,8 +302,7 @@ static const struct block_device_operations rbd_bd_ops = { * Initialize an rbd client instance. * We own *ceph_opts. */ -static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts, - struct rbd_options *rbd_opts) +static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) { struct rbd_client *rbdc; int ret = -ENOMEM; @@ -300,8 +326,6 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts, if (ret < 0) goto out_err; - rbdc->rbd_opts = rbd_opts; - spin_lock(&rbd_client_list_lock); list_add_tail(&rbdc->node, &rbd_client_list); spin_unlock(&rbd_client_list_lock); @@ -323,36 +347,52 @@ out_opt: } /* - * Find a ceph client with specific addr and configuration. + * Find a ceph client with specific addr and configuration. If + * found, bump its reference count. */ -static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts) +static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) { struct rbd_client *client_node; + bool found = false; if (ceph_opts->flags & CEPH_OPT_NOSHARE) return NULL; - list_for_each_entry(client_node, &rbd_client_list, node) - if (!ceph_compare_options(ceph_opts, client_node->client)) - return client_node; - return NULL; + spin_lock(&rbd_client_list_lock); + list_for_each_entry(client_node, &rbd_client_list, node) { + if (!ceph_compare_options(ceph_opts, client_node->client)) { + kref_get(&client_node->kref); + found = true; + break; + } + } + spin_unlock(&rbd_client_list_lock); + + return found ? client_node : NULL; } /* * mount options */ enum { - Opt_notify_timeout, Opt_last_int, /* int args above */ Opt_last_string, /* string args above */ + Opt_read_only, + Opt_read_write, + /* Boolean args above */ + Opt_last_bool, }; static match_table_t rbd_opts_tokens = { - {Opt_notify_timeout, "notify_timeout=%d"}, /* int args above */ /* string args above */ + {Opt_read_only, "mapping.read_only"}, + {Opt_read_only, "ro"}, /* Alternate spelling */ + {Opt_read_write, "read_write"}, + {Opt_read_write, "rw"}, /* Alternate spelling */ + /* Boolean args above */ {-1, NULL} }; @@ -377,16 +417,22 @@ static int parse_rbd_opts_token(char *c, void *private) } else if (token > Opt_last_int && token < Opt_last_string) { dout("got string token %d val %s\n", token, argstr[0].from); + } else if (token > Opt_last_string && token < Opt_last_bool) { + dout("got Boolean token %d\n", token); } else { dout("got token %d\n", token); } switch (token) { - case Opt_notify_timeout: - rbd_opts->notify_timeout = intval; + case Opt_read_only: + rbd_opts->read_only = true; + break; + case Opt_read_write: + rbd_opts->read_only = false; break; default: - BUG_ON(token); + rbd_assert(false); + break; } return 0; } @@ -395,48 +441,33 @@ static int parse_rbd_opts_token(char *c, void *private) * Get a ceph client with specific addr and configuration, if one does * not exist create it. */ -static struct rbd_client *rbd_get_client(const char *mon_addr, - size_t mon_addr_len, - char *options) +static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, + size_t mon_addr_len, char *options) { - struct rbd_client *rbdc; + struct rbd_options *rbd_opts = &rbd_dev->rbd_opts; struct ceph_options *ceph_opts; - struct rbd_options *rbd_opts; - - rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL); - if (!rbd_opts) - return ERR_PTR(-ENOMEM); + struct rbd_client *rbdc; - rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; + rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; ceph_opts = ceph_parse_options(options, mon_addr, mon_addr + mon_addr_len, parse_rbd_opts_token, rbd_opts); - if (IS_ERR(ceph_opts)) { - kfree(rbd_opts); - return ERR_CAST(ceph_opts); - } + if (IS_ERR(ceph_opts)) + return PTR_ERR(ceph_opts); - spin_lock(&rbd_client_list_lock); - rbdc = __rbd_client_find(ceph_opts); + rbdc = rbd_client_find(ceph_opts); if (rbdc) { /* using an existing client */ - kref_get(&rbdc->kref); - spin_unlock(&rbd_client_list_lock); - ceph_destroy_options(ceph_opts); - kfree(rbd_opts); - - return rbdc; + } else { + rbdc = rbd_client_create(ceph_opts); + if (IS_ERR(rbdc)) + return PTR_ERR(rbdc); } - spin_unlock(&rbd_client_list_lock); - - rbdc = rbd_client_create(ceph_opts, rbd_opts); + rbd_dev->rbd_client = rbdc; - if (IS_ERR(rbdc)) - kfree(rbd_opts); - - return rbdc; + return 0; } /* @@ -454,7 +485,6 @@ static void rbd_client_release(struct kref *kref) spin_unlock(&rbd_client_list_lock); ceph_destroy_client(rbdc->client); - kfree(rbdc->rbd_opts); kfree(rbdc); } @@ -480,10 +510,38 @@ static void rbd_coll_release(struct kref *kref) kfree(coll); } +static bool rbd_image_format_valid(u32 image_format) +{ + return image_format == 1 || image_format == 2; +} + static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) { - return !memcmp(&ondisk->text, - RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)); + size_t size; + u32 snap_count; + + /* The header has to start with the magic rbd header text */ + if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) + return false; + + /* + * The size of a snapshot header has to fit in a size_t, and + * that limits the number of snapshots. + */ + snap_count = le32_to_cpu(ondisk->snap_count); + size = SIZE_MAX - sizeof (struct ceph_snap_context); + if (snap_count > size / sizeof (__le64)) + return false; + + /* + * Not only that, but the size of the entire the snapshot + * header must also be representable in a size_t. + */ + size -= snap_count * sizeof (__le64); + if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) + return false; + + return true; } /* @@ -491,179 +549,203 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) * header. */ static int rbd_header_from_disk(struct rbd_image_header *header, - struct rbd_image_header_ondisk *ondisk, - u32 allocated_snaps) + struct rbd_image_header_ondisk *ondisk) { u32 snap_count; + size_t len; + size_t size; + u32 i; - if (!rbd_dev_ondisk_valid(ondisk)) - return -ENXIO; + memset(header, 0, sizeof (*header)); snap_count = le32_to_cpu(ondisk->snap_count); - if (snap_count > (SIZE_MAX - sizeof(struct ceph_snap_context)) - / sizeof (u64)) - return -EINVAL; - header->snapc = kmalloc(sizeof(struct ceph_snap_context) + - snap_count * sizeof(u64), - GFP_KERNEL); - if (!header->snapc) + + len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); + header->object_prefix = kmalloc(len + 1, GFP_KERNEL); + if (!header->object_prefix) return -ENOMEM; + memcpy(header->object_prefix, ondisk->object_prefix, len); + header->object_prefix[len] = '\0'; if (snap_count) { - header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); - header->snap_names = kmalloc(header->snap_names_len, - GFP_KERNEL); + u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); + + /* Save a copy of the snapshot names */ + + if (snap_names_len > (u64) SIZE_MAX) + return -EIO; + header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); if (!header->snap_names) - goto err_snapc; - header->snap_sizes = kmalloc(snap_count * sizeof(u64), - GFP_KERNEL); + goto out_err; + /* + * Note that rbd_dev_v1_header_read() guarantees + * the ondisk buffer we're working with has + * snap_names_len bytes beyond the end of the + * snapshot id array, this memcpy() is safe. + */ + memcpy(header->snap_names, &ondisk->snaps[snap_count], + snap_names_len); + + /* Record each snapshot's size */ + + size = snap_count * sizeof (*header->snap_sizes); + header->snap_sizes = kmalloc(size, GFP_KERNEL); if (!header->snap_sizes) - goto err_names; + goto out_err; + for (i = 0; i < snap_count; i++) + header->snap_sizes[i] = + le64_to_cpu(ondisk->snaps[i].image_size); } else { WARN_ON(ondisk->snap_names_len); - header->snap_names_len = 0; header->snap_names = NULL; header->snap_sizes = NULL; } - header->object_prefix = kmalloc(sizeof (ondisk->block_name) + 1, - GFP_KERNEL); - if (!header->object_prefix) - goto err_sizes; - - memcpy(header->object_prefix, ondisk->block_name, - sizeof(ondisk->block_name)); - header->object_prefix[sizeof (ondisk->block_name)] = '\0'; - - header->image_size = le64_to_cpu(ondisk->image_size); + header->features = 0; /* No features support in v1 images */ header->obj_order = ondisk->options.order; header->crypt_type = ondisk->options.crypt_type; header->comp_type = ondisk->options.comp_type; + /* Allocate and fill in the snapshot context */ + + header->image_size = le64_to_cpu(ondisk->image_size); + size = sizeof (struct ceph_snap_context); + size += snap_count * sizeof (header->snapc->snaps[0]); + header->snapc = kzalloc(size, GFP_KERNEL); + if (!header->snapc) + goto out_err; + atomic_set(&header->snapc->nref, 1); header->snapc->seq = le64_to_cpu(ondisk->snap_seq); header->snapc->num_snaps = snap_count; - header->total_snaps = snap_count; - - if (snap_count && allocated_snaps == snap_count) { - int i; - - for (i = 0; i < snap_count; i++) { - header->snapc->snaps[i] = - le64_to_cpu(ondisk->snaps[i].id); - header->snap_sizes[i] = - le64_to_cpu(ondisk->snaps[i].image_size); - } - - /* copy snapshot names */ - memcpy(header->snap_names, &ondisk->snaps[snap_count], - header->snap_names_len); - } + for (i = 0; i < snap_count; i++) + header->snapc->snaps[i] = + le64_to_cpu(ondisk->snaps[i].id); return 0; -err_sizes: +out_err: kfree(header->snap_sizes); header->snap_sizes = NULL; -err_names: kfree(header->snap_names); header->snap_names = NULL; -err_snapc: - kfree(header->snapc); - header->snapc = NULL; + kfree(header->object_prefix); + header->object_prefix = NULL; return -ENOMEM; } -static int snap_by_name(struct rbd_image_header *header, const char *snap_name, - u64 *seq, u64 *size) +static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name) { - int i; - char *p = header->snap_names; - for (i = 0; i < header->total_snaps; i++) { - if (!strcmp(snap_name, p)) { + struct rbd_snap *snap; - /* Found it. Pass back its id and/or size */ + list_for_each_entry(snap, &rbd_dev->snaps, node) { + if (!strcmp(snap_name, snap->name)) { + rbd_dev->mapping.snap_id = snap->id; + rbd_dev->mapping.size = snap->size; + rbd_dev->mapping.features = snap->features; - if (seq) - *seq = header->snapc->snaps[i]; - if (size) - *size = header->snap_sizes[i]; - return i; + return 0; } - p += strlen(p) + 1; /* Skip ahead to the next name */ } + return -ENOENT; } -static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size) +static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name) { int ret; - down_write(&rbd_dev->header_rwsem); - - if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, + if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { - rbd_dev->snap_id = CEPH_NOSNAP; - rbd_dev->snap_exists = false; - rbd_dev->read_only = 0; - if (size) - *size = rbd_dev->header.image_size; + rbd_dev->mapping.snap_id = CEPH_NOSNAP; + rbd_dev->mapping.size = rbd_dev->header.image_size; + rbd_dev->mapping.features = rbd_dev->header.features; + rbd_dev->mapping.snap_exists = false; + rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only; + ret = 0; } else { - u64 snap_id = 0; - - ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name, - &snap_id, size); + ret = snap_by_name(rbd_dev, snap_name); if (ret < 0) goto done; - rbd_dev->snap_id = snap_id; - rbd_dev->snap_exists = true; - rbd_dev->read_only = 1; + rbd_dev->mapping.snap_exists = true; + rbd_dev->mapping.read_only = true; } - - ret = 0; + rbd_dev->mapping.snap_name = snap_name; done: - up_write(&rbd_dev->header_rwsem); return ret; } static void rbd_header_free(struct rbd_image_header *header) { kfree(header->object_prefix); + header->object_prefix = NULL; kfree(header->snap_sizes); + header->snap_sizes = NULL; kfree(header->snap_names); + header->snap_names = NULL; ceph_put_snap_context(header->snapc); + header->snapc = NULL; } -/* - * get the actual striped segment name, offset and length - */ -static u64 rbd_get_segment(struct rbd_image_header *header, - const char *object_prefix, - u64 ofs, u64 len, - char *seg_name, u64 *segofs) +static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) { - u64 seg = ofs >> header->obj_order; + char *name; + u64 segment; + int ret; - if (seg_name) - snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, - "%s.%012llx", object_prefix, seg); + name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + if (!name) + return NULL; + segment = offset >> rbd_dev->header.obj_order; + ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx", + rbd_dev->header.object_prefix, segment); + if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) { + pr_err("error formatting segment name for #%llu (%d)\n", + segment, ret); + kfree(name); + name = NULL; + } - ofs = ofs & ((1 << header->obj_order) - 1); - len = min_t(u64, len, (1 << header->obj_order) - ofs); + return name; +} - if (segofs) - *segofs = ofs; +static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) +{ + u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; + + return offset & (segment_size - 1); +} + +static u64 rbd_segment_length(struct rbd_device *rbd_dev, + u64 offset, u64 length) +{ + u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; - return len; + offset &= segment_size - 1; + + rbd_assert(length <= U64_MAX - offset); + if (offset + length > segment_size) + length = segment_size - offset; + + return length; } static int rbd_get_num_segments(struct rbd_image_header *header, u64 ofs, u64 len) { - u64 start_seg = ofs >> header->obj_order; - u64 end_seg = (ofs + len - 1) >> header->obj_order; + u64 start_seg; + u64 end_seg; + + if (!len) + return 0; + if (len - 1 > U64_MAX - ofs) + return -ERANGE; + + start_seg = ofs >> header->obj_order; + end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; } @@ -725,7 +807,9 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, struct bio_pair **bp, int len, gfp_t gfpmask) { - struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL; + struct bio *old_chain = *old; + struct bio *new_chain = NULL; + struct bio *tail; int total = 0; if (*bp) { @@ -734,9 +818,12 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, } while (old_chain && (total < len)) { + struct bio *tmp; + tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); if (!tmp) goto err_out; + gfpmask &= ~__GFP_WAIT; /* can't wait after the first */ if (total + old_chain->bi_size > len) { struct bio_pair *bp; @@ -764,24 +851,18 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, } tmp->bi_bdev = NULL; - gfpmask &= ~__GFP_WAIT; tmp->bi_next = NULL; - - if (!new_chain) { - new_chain = tail = tmp; - } else { + if (new_chain) tail->bi_next = tmp; - tail = tmp; - } + else + new_chain = tmp; + tail = tmp; old_chain = old_chain->bi_next; total += tmp->bi_size; } - BUG_ON(total < len); - - if (tail) - tail->bi_next = NULL; + rbd_assert(total == len); *old = old_chain; @@ -939,8 +1020,9 @@ static int rbd_do_request(struct request *rq, layout->fl_stripe_count = cpu_to_le32(1); layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id); - ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, - req, ops); + ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, + req, ops); + rbd_assert(ret == 0); ceph_osdc_build_request(req, ofs, &len, ops, @@ -1031,8 +1113,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, int flags, struct ceph_osd_req_op *ops, const char *object_name, - u64 ofs, u64 len, - char *buf, + u64 ofs, u64 inbound_size, + char *inbound, struct ceph_osd_request **linger_req, u64 *ver) { @@ -1040,15 +1122,15 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, struct page **pages; int num_pages; - BUG_ON(ops == NULL); + rbd_assert(ops != NULL); - num_pages = calc_pages_for(ofs , len); + num_pages = calc_pages_for(ofs, inbound_size); pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); ret = rbd_do_request(NULL, rbd_dev, snapc, snapid, - object_name, ofs, len, NULL, + object_name, ofs, inbound_size, NULL, pages, num_pages, flags, ops, @@ -1058,8 +1140,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, if (ret < 0) goto done; - if ((flags & CEPH_OSD_FLAG_READ) && buf) - ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); + if ((flags & CEPH_OSD_FLAG_READ) && inbound) + ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret); done: ceph_release_page_vector(pages, num_pages); @@ -1086,14 +1168,11 @@ static int rbd_do_op(struct request *rq, struct ceph_osd_req_op *ops; u32 payload_len; - seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + seg_name = rbd_segment_name(rbd_dev, ofs); if (!seg_name) return -ENOMEM; - - seg_len = rbd_get_segment(&rbd_dev->header, - rbd_dev->header.object_prefix, - ofs, len, - seg_name, &seg_ofs); + seg_len = rbd_segment_length(rbd_dev, ofs, len); + seg_ofs = rbd_segment_offset(rbd_dev, ofs); payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); @@ -1105,7 +1184,7 @@ static int rbd_do_op(struct request *rq, /* we've taken care of segment sizes earlier when we cloned the bios. We should never have a segment truncated at this point */ - BUG_ON(seg_len < len); + rbd_assert(seg_len == len); ret = rbd_do_request(rq, rbd_dev, snapc, snapid, seg_name, seg_ofs, seg_len, @@ -1307,89 +1386,36 @@ static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev) return ret; } -struct rbd_notify_info { - struct rbd_device *rbd_dev; -}; - -static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data) -{ - struct rbd_device *rbd_dev = (struct rbd_device *)data; - if (!rbd_dev) - return; - - dout("rbd_notify_cb %s notify_id=%llu opcode=%u\n", - rbd_dev->header_name, (unsigned long long) notify_id, - (unsigned int) opcode); -} - -/* - * Request sync osd notify - */ -static int rbd_req_sync_notify(struct rbd_device *rbd_dev) -{ - struct ceph_osd_req_op *ops; - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct ceph_osd_event *event; - struct rbd_notify_info info; - int payload_len = sizeof(u32) + sizeof(u32); - int ret; - - ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY, payload_len); - if (!ops) - return -ENOMEM; - - info.rbd_dev = rbd_dev; - - ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1, - (void *)&info, &event); - if (ret < 0) - goto fail; - - ops[0].watch.ver = 1; - ops[0].watch.flag = 1; - ops[0].watch.cookie = event->cookie; - ops[0].watch.prot_ver = RADOS_NOTIFY_VER; - ops[0].watch.timeout = 12; - - ret = rbd_req_sync_op(rbd_dev, NULL, - CEPH_NOSNAP, - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - ops, - rbd_dev->header_name, - 0, 0, NULL, NULL, NULL); - if (ret < 0) - goto fail_event; - - ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT); - dout("ceph_osdc_wait_event returned %d\n", ret); - rbd_destroy_ops(ops); - return 0; - -fail_event: - ceph_osdc_cancel_event(event); -fail: - rbd_destroy_ops(ops); - return ret; -} - /* - * Request sync osd read + * Synchronous osd object method call */ static int rbd_req_sync_exec(struct rbd_device *rbd_dev, const char *object_name, const char *class_name, const char *method_name, - const char *data, - int len, + const char *outbound, + size_t outbound_size, + char *inbound, + size_t inbound_size, + int flags, u64 *ver) { struct ceph_osd_req_op *ops; int class_name_len = strlen(class_name); int method_name_len = strlen(method_name); + int payload_size; int ret; - ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, - class_name_len + method_name_len + len); + /* + * Any input parameters required by the method we're calling + * will be sent along with the class and method names as + * part of the message payload. That data and its size are + * supplied via the indata and indata_len fields (named from + * the perspective of the server side) in the OSD request + * operation. + */ + payload_size = class_name_len + method_name_len + outbound_size; + ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size); if (!ops) return -ENOMEM; @@ -1398,14 +1424,14 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev, ops[0].cls.method_name = method_name; ops[0].cls.method_len = (__u8) method_name_len; ops[0].cls.argc = 0; - ops[0].cls.indata = data; - ops[0].cls.indata_len = len; + ops[0].cls.indata = outbound; + ops[0].cls.indata_len = outbound_size; ret = rbd_req_sync_op(rbd_dev, NULL, CEPH_NOSNAP, - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - ops, - object_name, 0, 0, NULL, NULL, ver); + flags, ops, + object_name, 0, inbound_size, inbound, + NULL, ver); rbd_destroy_ops(ops); @@ -1447,10 +1473,6 @@ static void rbd_rq_fn(struct request_queue *q) struct rbd_req_coll *coll; struct ceph_snap_context *snapc; - /* peek at request from block layer */ - if (!rq) - break; - dout("fetched request\n"); /* filter out block requests we don't understand */ @@ -1465,7 +1487,7 @@ static void rbd_rq_fn(struct request_queue *q) size = blk_rq_bytes(rq); ofs = blk_rq_pos(rq) * SECTOR_SIZE; rq_bio = rq->bio; - if (do_write && rbd_dev->read_only) { + if (do_write && rbd_dev->mapping.read_only) { __blk_end_request_all(rq, -EROFS); continue; } @@ -1474,7 +1496,8 @@ static void rbd_rq_fn(struct request_queue *q) down_read(&rbd_dev->header_rwsem); - if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) { + if (rbd_dev->mapping.snap_id != CEPH_NOSNAP && + !rbd_dev->mapping.snap_exists) { up_read(&rbd_dev->header_rwsem); dout("request for non-existent snapshot"); spin_lock_irq(q->queue_lock); @@ -1491,6 +1514,12 @@ static void rbd_rq_fn(struct request_queue *q) size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE); num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + if (num_segs <= 0) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, num_segs); + ceph_put_snap_context(snapc); + continue; + } coll = rbd_alloc_coll(num_segs); if (!coll) { spin_lock_irq(q->queue_lock); @@ -1502,10 +1531,7 @@ static void rbd_rq_fn(struct request_queue *q) do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt); - op_size = rbd_get_segment(&rbd_dev->header, - rbd_dev->header.object_prefix, - ofs, size, - NULL, NULL); + op_size = rbd_segment_length(rbd_dev, ofs, size); kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); @@ -1525,7 +1551,7 @@ static void rbd_rq_fn(struct request_queue *q) coll, cur_seg); else rbd_req_read(rq, rbd_dev, - rbd_dev->snap_id, + rbd_dev->mapping.snap_id, ofs, op_size, bio, coll, cur_seg); @@ -1581,8 +1607,6 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) if (!disk) return; - rbd_header_free(&rbd_dev->header); - if (disk->flags & GENHD_FL_UP) del_gendisk(disk); if (disk->queue) @@ -1591,105 +1615,96 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) } /* - * reload the ondisk the header + * Read the complete header for the given rbd device. + * + * Returns a pointer to a dynamically-allocated buffer containing + * the complete and validated header. Caller can pass the address + * of a variable that will be filled in with the version of the + * header object at the time it was read. + * + * Returns a pointer-coded errno if a failure occurs. */ -static int rbd_read_header(struct rbd_device *rbd_dev, - struct rbd_image_header *header) +static struct rbd_image_header_ondisk * +rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) { - ssize_t rc; - struct rbd_image_header_ondisk *dh; + struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; - u64 ver; - size_t len; + u64 names_size = 0; + u32 want_count; + int ret; /* - * First reads the fixed-size header to determine the number - * of snapshots, then re-reads it, along with all snapshot - * records as well as their stored names. + * The complete header will include an array of its 64-bit + * snapshot ids, followed by the names of those snapshots as + * a contiguous block of NUL-terminated strings. Note that + * the number of snapshots could change by the time we read + * it in, in which case we re-read it. */ - len = sizeof (*dh); - while (1) { - dh = kmalloc(len, GFP_KERNEL); - if (!dh) - return -ENOMEM; - - rc = rbd_req_sync_read(rbd_dev, - CEPH_NOSNAP, + do { + size_t size; + + kfree(ondisk); + + size = sizeof (*ondisk); + size += snap_count * sizeof (struct rbd_image_snap_ondisk); + size += names_size; + ondisk = kmalloc(size, GFP_KERNEL); + if (!ondisk) + return ERR_PTR(-ENOMEM); + + ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP, rbd_dev->header_name, - 0, len, - (char *)dh, &ver); - if (rc < 0) - goto out_dh; - - rc = rbd_header_from_disk(header, dh, snap_count); - if (rc < 0) { - if (rc == -ENXIO) - pr_warning("unrecognized header format" - " for image %s\n", - rbd_dev->image_name); - goto out_dh; + 0, size, + (char *) ondisk, version); + + if (ret < 0) + goto out_err; + if (WARN_ON((size_t) ret < size)) { + ret = -ENXIO; + pr_warning("short header read for image %s" + " (want %zd got %d)\n", + rbd_dev->image_name, size, ret); + goto out_err; + } + if (!rbd_dev_ondisk_valid(ondisk)) { + ret = -ENXIO; + pr_warning("invalid header for image %s\n", + rbd_dev->image_name); + goto out_err; } - if (snap_count == header->total_snaps) - break; + names_size = le64_to_cpu(ondisk->snap_names_len); + want_count = snap_count; + snap_count = le32_to_cpu(ondisk->snap_count); + } while (snap_count != want_count); - snap_count = header->total_snaps; - len = sizeof (*dh) + - snap_count * sizeof(struct rbd_image_snap_ondisk) + - header->snap_names_len; + return ondisk; - rbd_header_free(header); - kfree(dh); - } - header->obj_version = ver; +out_err: + kfree(ondisk); -out_dh: - kfree(dh); - return rc; + return ERR_PTR(ret); } /* - * create a snapshot + * reload the ondisk the header */ -static int rbd_header_add_snap(struct rbd_device *rbd_dev, - const char *snap_name, - gfp_t gfp_flags) +static int rbd_read_header(struct rbd_device *rbd_dev, + struct rbd_image_header *header) { - int name_len = strlen(snap_name); - u64 new_snapid; + struct rbd_image_header_ondisk *ondisk; + u64 ver = 0; int ret; - void *data, *p, *e; - struct ceph_mon_client *monc; - /* we should create a snapshot only if we're pointing at the head */ - if (rbd_dev->snap_id != CEPH_NOSNAP) - return -EINVAL; + ondisk = rbd_dev_v1_header_read(rbd_dev, &ver); + if (IS_ERR(ondisk)) + return PTR_ERR(ondisk); + ret = rbd_header_from_disk(header, ondisk); + if (ret >= 0) + header->obj_version = ver; + kfree(ondisk); - monc = &rbd_dev->rbd_client->client->monc; - ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id, &new_snapid); - dout("created snapid=%llu\n", (unsigned long long) new_snapid); - if (ret < 0) - return ret; - - data = kmalloc(name_len + 16, gfp_flags); - if (!data) - return -ENOMEM; - - p = data; - e = data + name_len + 16; - - ceph_encode_string_safe(&p, e, snap_name, name_len, bad); - ceph_encode_64_safe(&p, e, new_snapid, bad); - - ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, - "rbd", "snap_add", - data, p - data, NULL); - - kfree(data); - - return ret < 0 ? ret : 0; -bad: - return -ERANGE; + return ret; } static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) @@ -1716,11 +1731,15 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver) down_write(&rbd_dev->header_rwsem); /* resized? */ - if (rbd_dev->snap_id == CEPH_NOSNAP) { + if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) { sector_t size = (sector_t) h.image_size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long) size); - set_capacity(rbd_dev->disk, size); + if (size != (sector_t) rbd_dev->mapping.size) { + dout("setting size to %llu sectors", + (unsigned long long) size); + rbd_dev->mapping.size = (u64) size; + set_capacity(rbd_dev->disk, size); + } } /* rbd_dev->header.object_prefix shouldn't change */ @@ -1733,16 +1752,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver) *hver = h.obj_version; rbd_dev->header.obj_version = h.obj_version; rbd_dev->header.image_size = h.image_size; - rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; - rbd_dev->header.snap_names_len = h.snap_names_len; rbd_dev->header.snap_sizes = h.snap_sizes; /* Free the extra copy of the object prefix */ WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix)); kfree(h.object_prefix); - ret = __rbd_init_snaps_header(rbd_dev); + ret = rbd_dev_snaps_update(rbd_dev); + if (!ret) + ret = rbd_dev_snaps_register(rbd_dev); up_write(&rbd_dev->header_rwsem); @@ -1764,29 +1783,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; - int rc; u64 segment_size; - u64 total_size = 0; - - /* contact OSD, request size info about the object being mapped */ - rc = rbd_read_header(rbd_dev, &rbd_dev->header); - if (rc) - return rc; - - /* no need to lock here, as rbd_dev is not registered yet */ - rc = __rbd_init_snaps_header(rbd_dev); - if (rc) - return rc; - - rc = rbd_header_set_snap(rbd_dev, &total_size); - if (rc) - return rc; /* create gendisk info */ - rc = -ENOMEM; disk = alloc_disk(RBD_MINORS_PER_MAJOR); if (!disk) - goto out; + return -ENOMEM; snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->dev_id); @@ -1796,7 +1798,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->private_data = rbd_dev; /* init rq */ - rc = -ENOMEM; q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); if (!q) goto out_disk; @@ -1817,20 +1818,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->queuedata = rbd_dev; rbd_dev->disk = disk; - rbd_dev->q = q; - /* finally, announce the disk to the world */ - set_capacity(disk, total_size / SECTOR_SIZE); - add_disk(disk); + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); - pr_info("%s: added with size 0x%llx\n", - disk->disk_name, (unsigned long long)total_size); return 0; - out_disk: put_disk(disk); -out: - return rc; + + return -ENOMEM; } /* @@ -1855,6 +1850,19 @@ static ssize_t rbd_size_show(struct device *dev, return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE); } +/* + * Note this shows the features for whatever's mapped, which is not + * necessarily the base image. + */ +static ssize_t rbd_features_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + + return sprintf(buf, "0x%016llx\n", + (unsigned long long) rbd_dev->mapping.features); +} + static ssize_t rbd_major_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1896,13 +1904,25 @@ static ssize_t rbd_name_show(struct device *dev, return sprintf(buf, "%s\n", rbd_dev->image_name); } +static ssize_t rbd_image_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + + return sprintf(buf, "%s\n", rbd_dev->image_id); +} + +/* + * Shows the name of the currently-mapped snapshot (or + * RBD_SNAP_HEAD_NAME for the base image). + */ static ssize_t rbd_snap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - return sprintf(buf, "%s\n", rbd_dev->snap_name); + return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name); } static ssize_t rbd_image_refresh(struct device *dev, @@ -1919,25 +1939,27 @@ static ssize_t rbd_image_refresh(struct device *dev, } static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); +static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL); static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL); static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); +static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); -static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, + &dev_attr_features.attr, &dev_attr_major.attr, &dev_attr_client_id.attr, &dev_attr_pool.attr, &dev_attr_pool_id.attr, &dev_attr_name.attr, + &dev_attr_image_id.attr, &dev_attr_current_snap.attr, &dev_attr_refresh.attr, - &dev_attr_create_snap.attr, NULL }; @@ -1983,12 +2005,24 @@ static ssize_t rbd_snap_id_show(struct device *dev, return sprintf(buf, "%llu\n", (unsigned long long)snap->id); } +static ssize_t rbd_snap_features_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "0x%016llx\n", + (unsigned long long) snap->features); +} + static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); +static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL); static struct attribute *rbd_snap_attrs[] = { &dev_attr_snap_size.attr, &dev_attr_snap_id.attr, + &dev_attr_snap_features.attr, NULL, }; @@ -2013,10 +2047,21 @@ static struct device_type rbd_snap_device_type = { .release = rbd_snap_dev_release, }; +static bool rbd_snap_registered(struct rbd_snap *snap) +{ + bool ret = snap->dev.type == &rbd_snap_device_type; + bool reg = device_is_registered(&snap->dev); + + rbd_assert(!ret ^ reg); + + return ret; +} + static void __rbd_remove_snap_dev(struct rbd_snap *snap) { list_del(&snap->node); - device_unregister(&snap->dev); + if (device_is_registered(&snap->dev)) + device_unregister(&snap->dev); } static int rbd_register_snap_dev(struct rbd_snap *snap, @@ -2029,13 +2074,17 @@ static int rbd_register_snap_dev(struct rbd_snap *snap, dev->parent = parent; dev->release = rbd_snap_dev_release; dev_set_name(dev, "snap_%s", snap->name); + dout("%s: registering device for snapshot %s\n", __func__, snap->name); + ret = device_register(dev); return ret; } static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, - int i, const char *name) + const char *snap_name, + u64 snap_id, u64 snap_size, + u64 snap_features) { struct rbd_snap *snap; int ret; @@ -2045,17 +2094,13 @@ static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, return ERR_PTR(-ENOMEM); ret = -ENOMEM; - snap->name = kstrdup(name, GFP_KERNEL); + snap->name = kstrdup(snap_name, GFP_KERNEL); if (!snap->name) goto err; - snap->size = rbd_dev->header.snap_sizes[i]; - snap->id = rbd_dev->header.snapc->snaps[i]; - if (device_is_registered(&rbd_dev->dev)) { - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); - if (ret < 0) - goto err; - } + snap->id = snap_id; + snap->size = snap_size; + snap->features = snap_features; return snap; @@ -2066,128 +2111,439 @@ err: return ERR_PTR(ret); } +static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + char *snap_name; + + rbd_assert(which < rbd_dev->header.snapc->num_snaps); + + *snap_size = rbd_dev->header.snap_sizes[which]; + *snap_features = 0; /* No features for v1 */ + + /* Skip over names until we find the one we are looking for */ + + snap_name = rbd_dev->header.snap_names; + while (which--) + snap_name += strlen(snap_name) + 1; + + return snap_name; +} + /* - * search for the previous snap in a null delimited string list + * Get the size and object order for an image snapshot, or if + * snap_id is CEPH_NOSNAP, gets this information for the base + * image. */ -const char *rbd_prev_snap_name(const char *name, const char *start) +static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, + u8 *order, u64 *snap_size) { - if (name < start + 2) - return NULL; + __le64 snapid = cpu_to_le64(snap_id); + int ret; + struct { + u8 order; + __le64 size; + } __attribute__ ((packed)) size_buf = { 0 }; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_size", + (char *) &snapid, sizeof (snapid), + (char *) &size_buf, sizeof (size_buf), + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + return ret; + + *order = size_buf.order; + *snap_size = le64_to_cpu(size_buf.size); - name -= 2; - while (*name) { - if (name == start) - return start; - name--; + dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", + (unsigned long long) snap_id, (unsigned int) *order, + (unsigned long long) *snap_size); + + return 0; +} + +static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) +{ + return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, + &rbd_dev->header.obj_order, + &rbd_dev->header.image_size); +} + +static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) +{ + void *reply_buf; + int ret; + void *p; + + reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL); + if (!reply_buf) + return -ENOMEM; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_object_prefix", + NULL, 0, + reply_buf, RBD_OBJ_PREFIX_LEN_MAX, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + p = reply_buf; + rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, + p + RBD_OBJ_PREFIX_LEN_MAX, + NULL, GFP_NOIO); + + if (IS_ERR(rbd_dev->header.object_prefix)) { + ret = PTR_ERR(rbd_dev->header.object_prefix); + rbd_dev->header.object_prefix = NULL; + } else { + dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); } - return name + 1; + +out: + kfree(reply_buf); + + return ret; } -/* - * compare the old list of snapshots that we have to what's in the header - * and update it accordingly. Note that the header holds the snapshots - * in a reverse order (from newest to oldest) and we need to go from - * older to new so that we don't get a duplicate snap name when - * doing the process (e.g., removed snapshot and recreated a new - * one with the same name. - */ -static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) +static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, + u64 *snap_features) { - const char *name, *first_name; - int i = rbd_dev->header.total_snaps; - struct rbd_snap *snap, *old_snap = NULL; - struct list_head *p, *n; + __le64 snapid = cpu_to_le64(snap_id); + struct { + __le64 features; + __le64 incompat; + } features_buf = { 0 }; + int ret; - first_name = rbd_dev->header.snap_names; - name = first_name + rbd_dev->header.snap_names_len; + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_features", + (char *) &snapid, sizeof (snapid), + (char *) &features_buf, sizeof (features_buf), + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + return ret; + *snap_features = le64_to_cpu(features_buf.features); - list_for_each_prev_safe(p, n, &rbd_dev->snaps) { - u64 cur_id; + dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", + (unsigned long long) snap_id, + (unsigned long long) *snap_features, + (unsigned long long) le64_to_cpu(features_buf.incompat)); - old_snap = list_entry(p, struct rbd_snap, node); + return 0; +} - if (i) - cur_id = rbd_dev->header.snapc->snaps[i - 1]; +static int rbd_dev_v2_features(struct rbd_device *rbd_dev) +{ + return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, + &rbd_dev->header.features); +} - if (!i || old_snap->id < cur_id) { - /* - * old_snap->id was skipped, thus was - * removed. If this rbd_dev is mapped to - * the removed snapshot, record that it no - * longer exists, to prevent further I/O. - */ - if (rbd_dev->snap_id == old_snap->id) - rbd_dev->snap_exists = false; - __rbd_remove_snap_dev(old_snap); - continue; - } - if (old_snap->id == cur_id) { - /* we have this snapshot already */ - i--; - name = rbd_prev_snap_name(name, first_name); +static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) +{ + size_t size; + int ret; + void *reply_buf; + void *p; + void *end; + u64 seq; + u32 snap_count; + struct ceph_snap_context *snapc; + u32 i; + + /* + * We'll need room for the seq value (maximum snapshot id), + * snapshot count, and array of that many snapshot ids. + * For now we have a fixed upper limit on the number we're + * prepared to receive. + */ + size = sizeof (__le64) + sizeof (__le32) + + RBD_MAX_SNAP_COUNT * sizeof (__le64); + reply_buf = kzalloc(size, GFP_KERNEL); + if (!reply_buf) + return -ENOMEM; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_snapcontext", + NULL, 0, + reply_buf, size, + CEPH_OSD_FLAG_READ, ver); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + ret = -ERANGE; + p = reply_buf; + end = (char *) reply_buf + size; + ceph_decode_64_safe(&p, end, seq, out); + ceph_decode_32_safe(&p, end, snap_count, out); + + /* + * Make sure the reported number of snapshot ids wouldn't go + * beyond the end of our buffer. But before checking that, + * make sure the computed size of the snapshot context we + * allocate is representable in a size_t. + */ + if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) + / sizeof (u64)) { + ret = -EINVAL; + goto out; + } + if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) + goto out; + + size = sizeof (struct ceph_snap_context) + + snap_count * sizeof (snapc->snaps[0]); + snapc = kmalloc(size, GFP_KERNEL); + if (!snapc) { + ret = -ENOMEM; + goto out; + } + + atomic_set(&snapc->nref, 1); + snapc->seq = seq; + snapc->num_snaps = snap_count; + for (i = 0; i < snap_count; i++) + snapc->snaps[i] = ceph_decode_64(&p); + + rbd_dev->header.snapc = snapc; + + dout(" snap context seq = %llu, snap_count = %u\n", + (unsigned long long) seq, (unsigned int) snap_count); + +out: + kfree(reply_buf); + + return 0; +} + +static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) +{ + size_t size; + void *reply_buf; + __le64 snap_id; + int ret; + void *p; + void *end; + size_t snap_name_len; + char *snap_name; + + size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; + reply_buf = kmalloc(size, GFP_KERNEL); + if (!reply_buf) + return ERR_PTR(-ENOMEM); + + snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_snapshot_name", + (char *) &snap_id, sizeof (snap_id), + reply_buf, size, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + p = reply_buf; + end = (char *) reply_buf + size; + snap_name_len = 0; + snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len, + GFP_KERNEL); + if (IS_ERR(snap_name)) { + ret = PTR_ERR(snap_name); + goto out; + } else { + dout(" snap_id 0x%016llx snap_name = %s\n", + (unsigned long long) le64_to_cpu(snap_id), snap_name); + } + kfree(reply_buf); + + return snap_name; +out: + kfree(reply_buf); + + return ERR_PTR(ret); +} + +static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + __le64 snap_id; + u8 order; + int ret; + + snap_id = rbd_dev->header.snapc->snaps[which]; + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size); + if (ret) + return ERR_PTR(ret); + ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features); + if (ret) + return ERR_PTR(ret); + + return rbd_dev_v2_snap_name(rbd_dev, which); +} + +static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + if (rbd_dev->image_format == 1) + return rbd_dev_v1_snap_info(rbd_dev, which, + snap_size, snap_features); + if (rbd_dev->image_format == 2) + return rbd_dev_v2_snap_info(rbd_dev, which, + snap_size, snap_features); + return ERR_PTR(-EINVAL); +} + +/* + * Scan the rbd device's current snapshot list and compare it to the + * newly-received snapshot context. Remove any existing snapshots + * not present in the new snapshot context. Add a new snapshot for + * any snaphots in the snapshot context not in the current list. + * And verify there are no changes to snapshots we already know + * about. + * + * Assumes the snapshots in the snapshot context are sorted by + * snapshot id, highest id first. (Snapshots in the rbd_dev's list + * are also maintained in that order.) + */ +static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) +{ + struct ceph_snap_context *snapc = rbd_dev->header.snapc; + const u32 snap_count = snapc->num_snaps; + struct list_head *head = &rbd_dev->snaps; + struct list_head *links = head->next; + u32 index = 0; + + dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count); + while (index < snap_count || links != head) { + u64 snap_id; + struct rbd_snap *snap; + char *snap_name; + u64 snap_size = 0; + u64 snap_features = 0; + + snap_id = index < snap_count ? snapc->snaps[index] + : CEPH_NOSNAP; + snap = links != head ? list_entry(links, struct rbd_snap, node) + : NULL; + rbd_assert(!snap || snap->id != CEPH_NOSNAP); + + if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) { + struct list_head *next = links->next; + + /* Existing snapshot not in the new snap context */ + + if (rbd_dev->mapping.snap_id == snap->id) + rbd_dev->mapping.snap_exists = false; + __rbd_remove_snap_dev(snap); + dout("%ssnap id %llu has been removed\n", + rbd_dev->mapping.snap_id == snap->id ? + "mapped " : "", + (unsigned long long) snap->id); + + /* Done with this list entry; advance */ + + links = next; continue; } - for (; i > 0; - i--, name = rbd_prev_snap_name(name, first_name)) { - if (!name) { - WARN_ON(1); - return -EINVAL; + + snap_name = rbd_dev_snap_info(rbd_dev, index, + &snap_size, &snap_features); + if (IS_ERR(snap_name)) + return PTR_ERR(snap_name); + + dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count, + (unsigned long long) snap_id); + if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) { + struct rbd_snap *new_snap; + + /* We haven't seen this snapshot before */ + + new_snap = __rbd_add_snap_dev(rbd_dev, snap_name, + snap_id, snap_size, snap_features); + if (IS_ERR(new_snap)) { + int err = PTR_ERR(new_snap); + + dout(" failed to add dev, error %d\n", err); + + return err; } - cur_id = rbd_dev->header.snapc->snaps[i]; - /* snapshot removal? handle it above */ - if (cur_id >= old_snap->id) - break; - /* a new snapshot */ - snap = __rbd_add_snap_dev(rbd_dev, i - 1, name); - if (IS_ERR(snap)) - return PTR_ERR(snap); - - /* note that we add it backward so using n and not p */ - list_add(&snap->node, n); - p = &snap->node; + + /* New goes before existing, or at end of list */ + + dout(" added dev%s\n", snap ? "" : " at end\n"); + if (snap) + list_add_tail(&new_snap->node, &snap->node); + else + list_add_tail(&new_snap->node, head); + } else { + /* Already have this one */ + + dout(" already present\n"); + + rbd_assert(snap->size == snap_size); + rbd_assert(!strcmp(snap->name, snap_name)); + rbd_assert(snap->features == snap_features); + + /* Done with this list entry; advance */ + + links = links->next; } + + /* Advance to the next entry in the snapshot context */ + + index++; } - /* we're done going over the old snap list, just add what's left */ - for (; i > 0; i--) { - name = rbd_prev_snap_name(name, first_name); - if (!name) { - WARN_ON(1); - return -EINVAL; + dout("%s: done\n", __func__); + + return 0; +} + +/* + * Scan the list of snapshots and register the devices for any that + * have not already been registered. + */ +static int rbd_dev_snaps_register(struct rbd_device *rbd_dev) +{ + struct rbd_snap *snap; + int ret = 0; + + dout("%s called\n", __func__); + if (WARN_ON(!device_is_registered(&rbd_dev->dev))) + return -EIO; + + list_for_each_entry(snap, &rbd_dev->snaps, node) { + if (!rbd_snap_registered(snap)) { + ret = rbd_register_snap_dev(snap, &rbd_dev->dev); + if (ret < 0) + break; } - snap = __rbd_add_snap_dev(rbd_dev, i - 1, name); - if (IS_ERR(snap)) - return PTR_ERR(snap); - list_add(&snap->node, &rbd_dev->snaps); } + dout("%s: returning %d\n", __func__, ret); - return 0; + return ret; } static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { - int ret; struct device *dev; - struct rbd_snap *snap; + int ret; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - dev = &rbd_dev->dev; + dev = &rbd_dev->dev; dev->bus = &rbd_bus_type; dev->type = &rbd_device_type; dev->parent = &rbd_root_dev; dev->release = rbd_dev_release; dev_set_name(dev, "%d", rbd_dev->dev_id); ret = device_register(dev); - if (ret < 0) - goto out; - list_for_each_entry(snap, &rbd_dev->snaps, node) { - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); - if (ret < 0) - break; - } -out: mutex_unlock(&ctl_mutex); + return ret; } @@ -2212,33 +2568,37 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev) return ret; } -static atomic64_t rbd_id_max = ATOMIC64_INIT(0); +static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0); /* * Get a unique rbd identifier for the given new rbd_dev, and add * the rbd_dev to the global list. The minimum rbd id is 1. */ -static void rbd_id_get(struct rbd_device *rbd_dev) +static void rbd_dev_id_get(struct rbd_device *rbd_dev) { - rbd_dev->dev_id = atomic64_inc_return(&rbd_id_max); + rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max); spin_lock(&rbd_dev_list_lock); list_add_tail(&rbd_dev->node, &rbd_dev_list); spin_unlock(&rbd_dev_list_lock); + dout("rbd_dev %p given dev id %llu\n", rbd_dev, + (unsigned long long) rbd_dev->dev_id); } /* * Remove an rbd_dev from the global list, and record that its * identifier is no longer in use. */ -static void rbd_id_put(struct rbd_device *rbd_dev) +static void rbd_dev_id_put(struct rbd_device *rbd_dev) { struct list_head *tmp; int rbd_id = rbd_dev->dev_id; int max_id; - BUG_ON(rbd_id < 1); + rbd_assert(rbd_id > 0); + dout("rbd_dev %p released dev id %llu\n", rbd_dev, + (unsigned long long) rbd_dev->dev_id); spin_lock(&rbd_dev_list_lock); list_del_init(&rbd_dev->node); @@ -2246,7 +2606,7 @@ static void rbd_id_put(struct rbd_device *rbd_dev) * If the id being "put" is not the current maximum, there * is nothing special we need to do. */ - if (rbd_id != atomic64_read(&rbd_id_max)) { + if (rbd_id != atomic64_read(&rbd_dev_id_max)) { spin_unlock(&rbd_dev_list_lock); return; } @@ -2267,12 +2627,13 @@ static void rbd_id_put(struct rbd_device *rbd_dev) spin_unlock(&rbd_dev_list_lock); /* - * The max id could have been updated by rbd_id_get(), in + * The max id could have been updated by rbd_dev_id_get(), in * which case it now accurately reflects the new maximum. * Be careful not to overwrite the maximum value in that * case. */ - atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id); + atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id); + dout(" max dev id has been reset\n"); } /* @@ -2361,28 +2722,31 @@ static inline char *dup_token(const char **buf, size_t *lenp) } /* - * This fills in the pool_name, image_name, image_name_len, snap_name, - * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based - * on the list of monitor addresses and other options provided via - * /sys/bus/rbd/add. + * This fills in the pool_name, image_name, image_name_len, rbd_dev, + * rbd_md_name, and name fields of the given rbd_dev, based on the + * list of monitor addresses and other options provided via + * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated + * copy of the snapshot name to map if successful, or a + * pointer-coded error otherwise. * * Note: rbd_dev is assumed to have been initially zero-filled. */ -static int rbd_add_parse_args(struct rbd_device *rbd_dev, - const char *buf, - const char **mon_addrs, - size_t *mon_addrs_size, - char *options, - size_t options_size) +static char *rbd_add_parse_args(struct rbd_device *rbd_dev, + const char *buf, + const char **mon_addrs, + size_t *mon_addrs_size, + char *options, + size_t options_size) { size_t len; - int ret; + char *err_ptr = ERR_PTR(-EINVAL); + char *snap_name; /* The first four tokens are required */ len = next_token(&buf); if (!len) - return -EINVAL; + return err_ptr; *mon_addrs_size = len + 1; *mon_addrs = buf; @@ -2390,9 +2754,9 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev, len = copy_token(&buf, options, options_size); if (!len || len >= options_size) - return -EINVAL; + return err_ptr; - ret = -ENOMEM; + err_ptr = ERR_PTR(-ENOMEM); rbd_dev->pool_name = dup_token(&buf, NULL); if (!rbd_dev->pool_name) goto out_err; @@ -2401,41 +2765,227 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev, if (!rbd_dev->image_name) goto out_err; - /* Create the name of the header object */ + /* Snapshot name is optional */ + len = next_token(&buf); + if (!len) { + buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ + len = sizeof (RBD_SNAP_HEAD_NAME) - 1; + } + snap_name = kmalloc(len + 1, GFP_KERNEL); + if (!snap_name) + goto out_err; + memcpy(snap_name, buf, len); + *(snap_name + len) = '\0'; - rbd_dev->header_name = kmalloc(rbd_dev->image_name_len - + sizeof (RBD_SUFFIX), - GFP_KERNEL); - if (!rbd_dev->header_name) +dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len); + + return snap_name; + +out_err: + kfree(rbd_dev->image_name); + rbd_dev->image_name = NULL; + rbd_dev->image_name_len = 0; + kfree(rbd_dev->pool_name); + rbd_dev->pool_name = NULL; + + return err_ptr; +} + +/* + * An rbd format 2 image has a unique identifier, distinct from the + * name given to it by the user. Internally, that identifier is + * what's used to specify the names of objects related to the image. + * + * A special "rbd id" object is used to map an rbd image name to its + * id. If that object doesn't exist, then there is no v2 rbd image + * with the supplied name. + * + * This function will record the given rbd_dev's image_id field if + * it can be determined, and in that case will return 0. If any + * errors occur a negative errno will be returned and the rbd_dev's + * image_id field will be unchanged (and should be NULL). + */ +static int rbd_dev_image_id(struct rbd_device *rbd_dev) +{ + int ret; + size_t size; + char *object_name; + void *response; + void *p; + + /* + * First, see if the format 2 image id file exists, and if + * so, get the image's persistent id from it. + */ + size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len; + object_name = kmalloc(size, GFP_NOIO); + if (!object_name) + return -ENOMEM; + sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name); + dout("rbd id object name is %s\n", object_name); + + /* Response will be an encoded string, which includes a length */ + + size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; + response = kzalloc(size, GFP_NOIO); + if (!response) { + ret = -ENOMEM; + goto out; + } + + ret = rbd_req_sync_exec(rbd_dev, object_name, + "rbd", "get_id", + NULL, 0, + response, RBD_IMAGE_ID_LEN_MAX, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + p = response; + rbd_dev->image_id = ceph_extract_encoded_string(&p, + p + RBD_IMAGE_ID_LEN_MAX, + &rbd_dev->image_id_len, + GFP_NOIO); + if (IS_ERR(rbd_dev->image_id)) { + ret = PTR_ERR(rbd_dev->image_id); + rbd_dev->image_id = NULL; + } else { + dout("image_id is %s\n", rbd_dev->image_id); + } +out: + kfree(response); + kfree(object_name); + + return ret; +} + +static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) +{ + int ret; + size_t size; + + /* Version 1 images have no id; empty string is used */ + + rbd_dev->image_id = kstrdup("", GFP_KERNEL); + if (!rbd_dev->image_id) + return -ENOMEM; + rbd_dev->image_id_len = 0; + + /* Record the header object name for this rbd image. */ + + size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX); + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); + if (!rbd_dev->header_name) { + ret = -ENOMEM; goto out_err; + } sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX); + /* Populate rbd image metadata */ + + ret = rbd_read_header(rbd_dev, &rbd_dev->header); + if (ret < 0) + goto out_err; + rbd_dev->image_format = 1; + + dout("discovered version 1 image, header name is %s\n", + rbd_dev->header_name); + + return 0; + +out_err: + kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; + kfree(rbd_dev->image_id); + rbd_dev->image_id = NULL; + + return ret; +} + +static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) +{ + size_t size; + int ret; + u64 ver = 0; + /* - * The snapshot name is optional. If none is is supplied, - * we use the default value. + * Image id was filled in by the caller. Record the header + * object name for this rbd image. */ - rbd_dev->snap_name = dup_token(&buf, &len); - if (!rbd_dev->snap_name) + size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len; + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); + if (!rbd_dev->header_name) + return -ENOMEM; + sprintf(rbd_dev->header_name, "%s%s", + RBD_HEADER_PREFIX, rbd_dev->image_id); + + /* Get the size and object order for the image */ + + ret = rbd_dev_v2_image_size(rbd_dev); + if (ret < 0) goto out_err; - if (!len) { - /* Replace the empty name with the default */ - kfree(rbd_dev->snap_name); - rbd_dev->snap_name - = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL); - if (!rbd_dev->snap_name) - goto out_err; - memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, - sizeof (RBD_SNAP_HEAD_NAME)); - } + /* Get the object prefix (a.k.a. block_name) for the image */ - return 0; + ret = rbd_dev_v2_object_prefix(rbd_dev); + if (ret < 0) + goto out_err; + + /* Get the features for the image */ + ret = rbd_dev_v2_features(rbd_dev); + if (ret < 0) + goto out_err; + + /* crypto and compression type aren't (yet) supported for v2 images */ + + rbd_dev->header.crypt_type = 0; + rbd_dev->header.comp_type = 0; + + /* Get the snapshot context, plus the header version */ + + ret = rbd_dev_v2_snap_context(rbd_dev, &ver); + if (ret) + goto out_err; + rbd_dev->header.obj_version = ver; + + rbd_dev->image_format = 2; + + dout("discovered version 2 image, header name is %s\n", + rbd_dev->header_name); + + return -ENOTSUPP; out_err: kfree(rbd_dev->header_name); - kfree(rbd_dev->image_name); - kfree(rbd_dev->pool_name); - rbd_dev->pool_name = NULL; + rbd_dev->header_name = NULL; + kfree(rbd_dev->header.object_prefix); + rbd_dev->header.object_prefix = NULL; + + return ret; +} + +/* + * Probe for the existence of the header object for the given rbd + * device. For format 2 images this includes determining the image + * id. + */ +static int rbd_dev_probe(struct rbd_device *rbd_dev) +{ + int ret; + + /* + * Get the id from the image id object. If it's not a + * format 2 image, we'll get ENOENT back, and we'll assume + * it's a format 1 image. + */ + ret = rbd_dev_image_id(rbd_dev); + if (ret) + ret = rbd_dev_v1_probe(rbd_dev); + else + ret = rbd_dev_v2_probe(rbd_dev); + if (ret) + dout("probe failed, returning %d\n", ret); return ret; } @@ -2450,16 +3000,17 @@ static ssize_t rbd_add(struct bus_type *bus, size_t mon_addrs_size = 0; struct ceph_osd_client *osdc; int rc = -ENOMEM; + char *snap_name; if (!try_module_get(THIS_MODULE)) return -ENODEV; options = kmalloc(count, GFP_KERNEL); if (!options) - goto err_nomem; + goto err_out_mem; rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); if (!rbd_dev) - goto err_nomem; + goto err_out_mem; /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); @@ -2467,27 +3018,18 @@ static ssize_t rbd_add(struct bus_type *bus, INIT_LIST_HEAD(&rbd_dev->snaps); init_rwsem(&rbd_dev->header_rwsem); - /* generate unique id: find highest unique id, add one */ - rbd_id_get(rbd_dev); - - /* Fill in the device name, now that we have its id. */ - BUILD_BUG_ON(DEV_NAME_LEN - < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); - sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id); - /* parse add command */ - rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size, - options, count); - if (rc) - goto err_put_id; - - rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1, - options); - if (IS_ERR(rbd_dev->rbd_client)) { - rc = PTR_ERR(rbd_dev->rbd_client); - goto err_put_id; + snap_name = rbd_add_parse_args(rbd_dev, buf, + &mon_addrs, &mon_addrs_size, options, count); + if (IS_ERR(snap_name)) { + rc = PTR_ERR(snap_name); + goto err_out_mem; } + rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options); + if (rc < 0) + goto err_out_args; + /* pick the pool */ osdc = &rbd_dev->rbd_client->client->osdc; rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); @@ -2495,23 +3037,53 @@ static ssize_t rbd_add(struct bus_type *bus, goto err_out_client; rbd_dev->pool_id = rc; - /* register our block device */ - rc = register_blkdev(0, rbd_dev->name); + rc = rbd_dev_probe(rbd_dev); if (rc < 0) goto err_out_client; + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + + /* no need to lock here, as rbd_dev is not registered yet */ + rc = rbd_dev_snaps_update(rbd_dev); + if (rc) + goto err_out_header; + + rc = rbd_dev_set_mapping(rbd_dev, snap_name); + if (rc) + goto err_out_header; + + /* generate unique id: find highest unique id, add one */ + rbd_dev_id_get(rbd_dev); + + /* Fill in the device name, now that we have its id. */ + BUILD_BUG_ON(DEV_NAME_LEN + < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); + sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id); + + /* Get our block major device number. */ + + rc = register_blkdev(0, rbd_dev->name); + if (rc < 0) + goto err_out_id; rbd_dev->major = rc; - rc = rbd_bus_add_dev(rbd_dev); + /* Set up the blkdev mapping. */ + + rc = rbd_init_disk(rbd_dev); if (rc) goto err_out_blkdev; + rc = rbd_bus_add_dev(rbd_dev); + if (rc) + goto err_out_disk; + /* * At this point cleanup in the event of an error is the job * of the sysfs code (initiated by rbd_bus_del_dev()). - * - * Set up and announce blkdev mapping. */ - rc = rbd_init_disk(rbd_dev); + + down_write(&rbd_dev->header_rwsem); + rc = rbd_dev_snaps_register(rbd_dev); + up_write(&rbd_dev->header_rwsem); if (rc) goto err_out_bus; @@ -2519,6 +3091,13 @@ static ssize_t rbd_add(struct bus_type *bus, if (rc) goto err_out_bus; + /* Everything's ready. Announce the disk to the world. */ + + add_disk(rbd_dev->disk); + + pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, + (unsigned long long) rbd_dev->mapping.size); + return count; err_out_bus: @@ -2528,19 +3107,23 @@ err_out_bus: kfree(options); return rc; +err_out_disk: + rbd_free_disk(rbd_dev); err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); +err_out_id: + rbd_dev_id_put(rbd_dev); +err_out_header: + rbd_header_free(&rbd_dev->header); err_out_client: + kfree(rbd_dev->header_name); rbd_put_client(rbd_dev); -err_put_id: - if (rbd_dev->pool_name) { - kfree(rbd_dev->snap_name); - kfree(rbd_dev->header_name); - kfree(rbd_dev->image_name); - kfree(rbd_dev->pool_name); - } - rbd_id_put(rbd_dev); -err_nomem: + kfree(rbd_dev->image_id); +err_out_args: + kfree(rbd_dev->mapping.snap_name); + kfree(rbd_dev->image_name); + kfree(rbd_dev->pool_name); +err_out_mem: kfree(rbd_dev); kfree(options); @@ -2586,12 +3169,16 @@ static void rbd_dev_release(struct device *dev) rbd_free_disk(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); + /* release allocated disk header fields */ + rbd_header_free(&rbd_dev->header); + /* done with the id, and with the rbd_dev */ - kfree(rbd_dev->snap_name); + kfree(rbd_dev->mapping.snap_name); + kfree(rbd_dev->image_id); kfree(rbd_dev->header_name); kfree(rbd_dev->pool_name); kfree(rbd_dev->image_name); - rbd_id_put(rbd_dev); + rbd_dev_id_put(rbd_dev); kfree(rbd_dev); /* release module ref */ @@ -2629,47 +3216,7 @@ static ssize_t rbd_remove(struct bus_type *bus, done: mutex_unlock(&ctl_mutex); - return ret; -} -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - int ret; - char *name = kmalloc(count + 1, GFP_KERNEL); - if (!name) - return -ENOMEM; - - snprintf(name, count, "%s", buf); - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - ret = rbd_header_add_snap(rbd_dev, - name, GFP_KERNEL); - if (ret < 0) - goto err_unlock; - - ret = __rbd_refresh_header(rbd_dev, NULL); - if (ret < 0) - goto err_unlock; - - /* shouldn't hold ctl_mutex when notifying.. notify might - trigger a watch callback that would need to get that mutex */ - mutex_unlock(&ctl_mutex); - - /* make a best effort, don't error if failed */ - rbd_req_sync_notify(rbd_dev); - - ret = count; - kfree(name); - return ret; - -err_unlock: - mutex_unlock(&ctl_mutex); - kfree(name); return ret; } diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h index 0924e9e41a60..cbe77fa105ba 100644 --- a/drivers/block/rbd_types.h +++ b/drivers/block/rbd_types.h @@ -15,15 +15,30 @@ #include <linux/types.h> +/* For format version 2, rbd image 'foo' consists of objects + * rbd_id.foo - id of image + * rbd_header.<id> - image metadata + * rbd_data.<id>.0000000000000000 + * rbd_data.<id>.0000000000000001 + * ... - data + * Clients do not access header data directly in rbd format 2. + */ + +#define RBD_HEADER_PREFIX "rbd_header." +#define RBD_DATA_PREFIX "rbd_data." +#define RBD_ID_PREFIX "rbd_id." + /* - * rbd image 'foo' consists of objects - * foo.rbd - image metadata - * foo.00000000 - * foo.00000001 - * ... - data + * For format version 1, rbd image 'foo' consists of objects + * foo.rbd - image metadata + * rb.<idhi>.<idlo>.00000000 + * rb.<idhi>.<idlo>.00000001 + * ... - data + * There is no notion of a persistent image id in rbd format 1. */ #define RBD_SUFFIX ".rbd" + #define RBD_DIRECTORY "rbd_directory" #define RBD_INFO "rbd_info" @@ -47,7 +62,7 @@ struct rbd_image_snap_ondisk { struct rbd_image_header_ondisk { char text[40]; - char block_name[24]; + char object_prefix[24]; char signature[4]; char version[8]; struct { diff --git a/drivers/block/ub.c b/drivers/block/ub.c deleted file mode 100644 index fcec0225ac76..000000000000 --- a/drivers/block/ub.c +++ /dev/null @@ -1,2474 +0,0 @@ -/* - * The low performance USB storage driver (ub). - * - * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net) - * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com) - * - * This work is a part of Linux kernel, is derived from it, - * and is not licensed separately. See file COPYING for details. - * - * TODO (sorted by decreasing priority) - * -- Return sense now that rq allows it (we always auto-sense anyway). - * -- set readonly flag for CDs, set removable flag for CF readers - * -- do inquiry and verify we got a disk and not a tape (for LUN mismatch) - * -- verify the 13 conditions and do bulk resets - * -- highmem - * -- move top_sense and work_bcs into separate allocations (if they survive) - * for cache purists and esoteric architectures. - * -- Allocate structure for LUN 0 before the first ub_sync_tur, avoid NULL. ? - * -- prune comments, they are too volumnous - * -- Resove XXX's - * -- CLEAR, CLR2STS, CLRRS seem to be ripe for refactoring. - */ -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/usb.h> -#include <linux/usb_usual.h> -#include <linux/blkdev.h> -#include <linux/timer.h> -#include <linux/scatterlist.h> -#include <linux/slab.h> -#include <linux/mutex.h> -#include <scsi/scsi.h> - -#define DRV_NAME "ub" - -#define UB_MAJOR 180 - -/* - * The command state machine is the key model for understanding of this driver. - * - * The general rule is that all transitions are done towards the bottom - * of the diagram, thus preventing any loops. - * - * An exception to that is how the STAT state is handled. A counter allows it - * to be re-entered along the path marked with [C]. - * - * +--------+ - * ! INIT ! - * +--------+ - * ! - * ub_scsi_cmd_start fails ->--------------------------------------\ - * ! ! - * V ! - * +--------+ ! - * ! CMD ! ! - * +--------+ ! - * ! +--------+ ! - * was -EPIPE -->-------------------------------->! CLEAR ! ! - * ! +--------+ ! - * ! ! ! - * was error -->------------------------------------- ! --------->\ - * ! ! ! - * /--<-- cmd->dir == NONE ? ! ! - * ! ! ! ! - * ! V ! ! - * ! +--------+ ! ! - * ! ! DATA ! ! ! - * ! +--------+ ! ! - * ! ! +---------+ ! ! - * ! was -EPIPE -->--------------->! CLR2STS ! ! ! - * ! ! +---------+ ! ! - * ! ! ! ! ! - * ! ! was error -->---- ! --------->\ - * ! was error -->--------------------- ! ------------- ! --------->\ - * ! ! ! ! ! - * ! V ! ! ! - * \--->+--------+ ! ! ! - * ! STAT !<--------------------------/ ! ! - * /--->+--------+ ! ! - * ! ! ! ! - * [C] was -EPIPE -->-----------\ ! ! - * ! ! ! ! ! - * +<---- len == 0 ! ! ! - * ! ! ! ! ! - * ! was error -->--------------------------------------!---------->\ - * ! ! ! ! ! - * +<---- bad CSW ! ! ! - * +<---- bad tag ! ! ! - * ! ! V ! ! - * ! ! +--------+ ! ! - * ! ! ! CLRRS ! ! ! - * ! ! +--------+ ! ! - * ! ! ! ! ! - * \------- ! --------------------[C]--------\ ! ! - * ! ! ! ! - * cmd->error---\ +--------+ ! ! - * ! +--------------->! SENSE !<----------/ ! - * STAT_FAIL----/ +--------+ ! - * ! ! V - * ! V +--------+ - * \--------------------------------\--------------------->! DONE ! - * +--------+ - */ - -/* - * This many LUNs per USB device. - * Every one of them takes a host, see UB_MAX_HOSTS. - */ -#define UB_MAX_LUNS 9 - -/* - */ - -#define UB_PARTS_PER_LUN 8 - -#define UB_MAX_CDB_SIZE 16 /* Corresponds to Bulk */ - -#define UB_SENSE_SIZE 18 - -/* - */ -struct ub_dev; - -#define UB_MAX_REQ_SG 9 /* cdrecord requires 32KB and maybe a header */ -#define UB_MAX_SECTORS 64 - -/* - * A second is more than enough for a 32K transfer (UB_MAX_SECTORS) - * even if a webcam hogs the bus, but some devices need time to spin up. - */ -#define UB_URB_TIMEOUT (HZ*2) -#define UB_DATA_TIMEOUT (HZ*5) /* ZIP does spin-ups in the data phase */ -#define UB_STAT_TIMEOUT (HZ*5) /* Same spinups and eject for a dataless cmd. */ -#define UB_CTRL_TIMEOUT (HZ/2) /* 500ms ought to be enough to clear a stall */ - -/* - * An instance of a SCSI command in transit. - */ -#define UB_DIR_NONE 0 -#define UB_DIR_READ 1 -#define UB_DIR_ILLEGAL2 2 -#define UB_DIR_WRITE 3 - -#define UB_DIR_CHAR(c) (((c)==UB_DIR_WRITE)? 'w': \ - (((c)==UB_DIR_READ)? 'r': 'n')) - -enum ub_scsi_cmd_state { - UB_CMDST_INIT, /* Initial state */ - UB_CMDST_CMD, /* Command submitted */ - UB_CMDST_DATA, /* Data phase */ - UB_CMDST_CLR2STS, /* Clearing before requesting status */ - UB_CMDST_STAT, /* Status phase */ - UB_CMDST_CLEAR, /* Clearing a stall (halt, actually) */ - UB_CMDST_CLRRS, /* Clearing before retrying status */ - UB_CMDST_SENSE, /* Sending Request Sense */ - UB_CMDST_DONE /* Final state */ -}; - -struct ub_scsi_cmd { - unsigned char cdb[UB_MAX_CDB_SIZE]; - unsigned char cdb_len; - - unsigned char dir; /* 0 - none, 1 - read, 3 - write. */ - enum ub_scsi_cmd_state state; - unsigned int tag; - struct ub_scsi_cmd *next; - - int error; /* Return code - valid upon done */ - unsigned int act_len; /* Return size */ - unsigned char key, asc, ascq; /* May be valid if error==-EIO */ - - int stat_count; /* Retries getting status. */ - unsigned int timeo; /* jiffies until rq->timeout changes */ - - unsigned int len; /* Requested length */ - unsigned int current_sg; - unsigned int nsg; /* sgv[nsg] */ - struct scatterlist sgv[UB_MAX_REQ_SG]; - - struct ub_lun *lun; - void (*done)(struct ub_dev *, struct ub_scsi_cmd *); - void *back; -}; - -struct ub_request { - struct request *rq; - unsigned int current_try; - unsigned int nsg; /* sgv[nsg] */ - struct scatterlist sgv[UB_MAX_REQ_SG]; -}; - -/* - */ -struct ub_capacity { - unsigned long nsec; /* Linux size - 512 byte sectors */ - unsigned int bsize; /* Linux hardsect_size */ - unsigned int bshift; /* Shift between 512 and hard sects */ -}; - -/* - * This is a direct take-off from linux/include/completion.h - * The difference is that I do not wait on this thing, just poll. - * When I want to wait (ub_probe), I just use the stock completion. - * - * Note that INIT_COMPLETION takes no lock. It is correct. But why - * in the bloody hell that thing takes struct instead of pointer to struct - * is quite beyond me. I just copied it from the stock completion. - */ -struct ub_completion { - unsigned int done; - spinlock_t lock; -}; - -static DEFINE_MUTEX(ub_mutex); -static inline void ub_init_completion(struct ub_completion *x) -{ - x->done = 0; - spin_lock_init(&x->lock); -} - -#define UB_INIT_COMPLETION(x) ((x).done = 0) - -static void ub_complete(struct ub_completion *x) -{ - unsigned long flags; - - spin_lock_irqsave(&x->lock, flags); - x->done++; - spin_unlock_irqrestore(&x->lock, flags); -} - -static int ub_is_completed(struct ub_completion *x) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&x->lock, flags); - ret = x->done; - spin_unlock_irqrestore(&x->lock, flags); - return ret; -} - -/* - */ -struct ub_scsi_cmd_queue { - int qlen, qmax; - struct ub_scsi_cmd *head, *tail; -}; - -/* - * The block device instance (one per LUN). - */ -struct ub_lun { - struct ub_dev *udev; - struct list_head link; - struct gendisk *disk; - int id; /* Host index */ - int num; /* LUN number */ - char name[16]; - - int changed; /* Media was changed */ - int removable; - int readonly; - - struct ub_request urq; - - /* Use Ingo's mempool if or when we have more than one command. */ - /* - * Currently we never need more than one command for the whole device. - * However, giving every LUN a command is a cheap and automatic way - * to enforce fairness between them. - */ - int cmda[1]; - struct ub_scsi_cmd cmdv[1]; - - struct ub_capacity capacity; -}; - -/* - * The USB device instance. - */ -struct ub_dev { - spinlock_t *lock; - atomic_t poison; /* The USB device is disconnected */ - int openc; /* protected by ub_lock! */ - /* kref is too implicit for our taste */ - int reset; /* Reset is running */ - int bad_resid; - unsigned int tagcnt; - char name[12]; - struct usb_device *dev; - struct usb_interface *intf; - - struct list_head luns; - - unsigned int send_bulk_pipe; /* cached pipe values */ - unsigned int recv_bulk_pipe; - unsigned int send_ctrl_pipe; - unsigned int recv_ctrl_pipe; - - struct tasklet_struct tasklet; - - struct ub_scsi_cmd_queue cmd_queue; - struct ub_scsi_cmd top_rqs_cmd; /* REQUEST SENSE */ - unsigned char top_sense[UB_SENSE_SIZE]; - - struct ub_completion work_done; - struct urb work_urb; - struct timer_list work_timer; - int last_pipe; /* What might need clearing */ - __le32 signature; /* Learned signature */ - struct bulk_cb_wrap work_bcb; - struct bulk_cs_wrap work_bcs; - struct usb_ctrlrequest work_cr; - - struct work_struct reset_work; - wait_queue_head_t reset_wait; -}; - -/* - */ -static void ub_cleanup(struct ub_dev *sc); -static int ub_request_fn_1(struct ub_lun *lun, struct request *rq); -static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq); -static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq); -static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_end_rq(struct request *rq, unsigned int status); -static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, - struct ub_request *urq, struct ub_scsi_cmd *cmd); -static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_urb_complete(struct urb *urb); -static void ub_scsi_action(unsigned long _dev); -static void ub_scsi_dispatch(struct ub_dev *sc); -static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc); -static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd); -static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - int stalled_pipe); -static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd); -static void ub_reset_enter(struct ub_dev *sc, int try); -static void ub_reset_task(struct work_struct *work); -static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun); -static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, - struct ub_capacity *ret); -static int ub_sync_reset(struct ub_dev *sc); -static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe); -static int ub_probe_lun(struct ub_dev *sc, int lnum); - -/* - */ -#ifdef CONFIG_USB_LIBUSUAL - -#define ub_usb_ids usb_storage_usb_ids -#else - -static const struct usb_device_id ub_usb_ids[] = { - { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) }, - { } -}; - -MODULE_DEVICE_TABLE(usb, ub_usb_ids); -#endif /* CONFIG_USB_LIBUSUAL */ - -/* - * Find me a way to identify "next free minor" for add_disk(), - * and the array disappears the next day. However, the number of - * hosts has something to do with the naming and /proc/partitions. - * This has to be thought out in detail before changing. - * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure. - */ -#define UB_MAX_HOSTS 26 -static char ub_hostv[UB_MAX_HOSTS]; - -#define UB_QLOCK_NUM 5 -static spinlock_t ub_qlockv[UB_QLOCK_NUM]; -static int ub_qlock_next = 0; - -static DEFINE_SPINLOCK(ub_lock); /* Locks globals and ->openc */ - -/* - * The id allocator. - * - * This also stores the host for indexing by minor, which is somewhat dirty. - */ -static int ub_id_get(void) -{ - unsigned long flags; - int i; - - spin_lock_irqsave(&ub_lock, flags); - for (i = 0; i < UB_MAX_HOSTS; i++) { - if (ub_hostv[i] == 0) { - ub_hostv[i] = 1; - spin_unlock_irqrestore(&ub_lock, flags); - return i; - } - } - spin_unlock_irqrestore(&ub_lock, flags); - return -1; -} - -static void ub_id_put(int id) -{ - unsigned long flags; - - if (id < 0 || id >= UB_MAX_HOSTS) { - printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id); - return; - } - - spin_lock_irqsave(&ub_lock, flags); - if (ub_hostv[id] == 0) { - spin_unlock_irqrestore(&ub_lock, flags); - printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id); - return; - } - ub_hostv[id] = 0; - spin_unlock_irqrestore(&ub_lock, flags); -} - -/* - * This is necessitated by the fact that blk_cleanup_queue does not - * necesserily destroy the queue. Instead, it may merely decrease q->refcnt. - * Since our blk_init_queue() passes a spinlock common with ub_dev, - * we have life time issues when ub_cleanup frees ub_dev. - */ -static spinlock_t *ub_next_lock(void) -{ - unsigned long flags; - spinlock_t *ret; - - spin_lock_irqsave(&ub_lock, flags); - ret = &ub_qlockv[ub_qlock_next]; - ub_qlock_next = (ub_qlock_next + 1) % UB_QLOCK_NUM; - spin_unlock_irqrestore(&ub_lock, flags); - return ret; -} - -/* - * Downcount for deallocation. This rides on two assumptions: - * - once something is poisoned, its refcount cannot grow - * - opens cannot happen at this time (del_gendisk was done) - * If the above is true, we can drop the lock, which we need for - * blk_cleanup_queue(): the silly thing may attempt to sleep. - * [Actually, it never needs to sleep for us, but it calls might_sleep()] - */ -static void ub_put(struct ub_dev *sc) -{ - unsigned long flags; - - spin_lock_irqsave(&ub_lock, flags); - --sc->openc; - if (sc->openc == 0 && atomic_read(&sc->poison)) { - spin_unlock_irqrestore(&ub_lock, flags); - ub_cleanup(sc); - } else { - spin_unlock_irqrestore(&ub_lock, flags); - } -} - -/* - * Final cleanup and deallocation. - */ -static void ub_cleanup(struct ub_dev *sc) -{ - struct list_head *p; - struct ub_lun *lun; - struct request_queue *q; - - while (!list_empty(&sc->luns)) { - p = sc->luns.next; - lun = list_entry(p, struct ub_lun, link); - list_del(p); - - /* I don't think queue can be NULL. But... Stolen from sx8.c */ - if ((q = lun->disk->queue) != NULL) - blk_cleanup_queue(q); - /* - * If we zero disk->private_data BEFORE put_disk, we have - * to check for NULL all over the place in open, release, - * check_media and revalidate, because the block level - * semaphore is well inside the put_disk. - * But we cannot zero after the call, because *disk is gone. - * The sd.c is blatantly racy in this area. - */ - /* disk->private_data = NULL; */ - put_disk(lun->disk); - lun->disk = NULL; - - ub_id_put(lun->id); - kfree(lun); - } - - usb_set_intfdata(sc->intf, NULL); - usb_put_intf(sc->intf); - usb_put_dev(sc->dev); - kfree(sc); -} - -/* - * The "command allocator". - */ -static struct ub_scsi_cmd *ub_get_cmd(struct ub_lun *lun) -{ - struct ub_scsi_cmd *ret; - - if (lun->cmda[0]) - return NULL; - ret = &lun->cmdv[0]; - lun->cmda[0] = 1; - return ret; -} - -static void ub_put_cmd(struct ub_lun *lun, struct ub_scsi_cmd *cmd) -{ - if (cmd != &lun->cmdv[0]) { - printk(KERN_WARNING "%s: releasing a foreign cmd %p\n", - lun->name, cmd); - return; - } - if (!lun->cmda[0]) { - printk(KERN_WARNING "%s: releasing a free cmd\n", lun->name); - return; - } - lun->cmda[0] = 0; -} - -/* - * The command queue. - */ -static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct ub_scsi_cmd_queue *t = &sc->cmd_queue; - - if (t->qlen++ == 0) { - t->head = cmd; - t->tail = cmd; - } else { - t->tail->next = cmd; - t->tail = cmd; - } - - if (t->qlen > t->qmax) - t->qmax = t->qlen; -} - -static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct ub_scsi_cmd_queue *t = &sc->cmd_queue; - - if (t->qlen++ == 0) { - t->head = cmd; - t->tail = cmd; - } else { - cmd->next = t->head; - t->head = cmd; - } - - if (t->qlen > t->qmax) - t->qmax = t->qlen; -} - -static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc) -{ - struct ub_scsi_cmd_queue *t = &sc->cmd_queue; - struct ub_scsi_cmd *cmd; - - if (t->qlen == 0) - return NULL; - if (--t->qlen == 0) - t->tail = NULL; - cmd = t->head; - t->head = cmd->next; - cmd->next = NULL; - return cmd; -} - -#define ub_cmdq_peek(sc) ((sc)->cmd_queue.head) - -/* - * The request function is our main entry point - */ - -static void ub_request_fn(struct request_queue *q) -{ - struct ub_lun *lun = q->queuedata; - struct request *rq; - - while ((rq = blk_peek_request(q)) != NULL) { - if (ub_request_fn_1(lun, rq) != 0) { - blk_stop_queue(q); - break; - } - } -} - -static int ub_request_fn_1(struct ub_lun *lun, struct request *rq) -{ - struct ub_dev *sc = lun->udev; - struct ub_scsi_cmd *cmd; - struct ub_request *urq; - int n_elem; - - if (atomic_read(&sc->poison)) { - blk_start_request(rq); - ub_end_rq(rq, DID_NO_CONNECT << 16); - return 0; - } - - if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) { - blk_start_request(rq); - ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); - return 0; - } - - if (lun->urq.rq != NULL) - return -1; - if ((cmd = ub_get_cmd(lun)) == NULL) - return -1; - memset(cmd, 0, sizeof(struct ub_scsi_cmd)); - - blk_start_request(rq); - - urq = &lun->urq; - memset(urq, 0, sizeof(struct ub_request)); - urq->rq = rq; - - /* - * get scatterlist from block layer - */ - sg_init_table(&urq->sgv[0], UB_MAX_REQ_SG); - n_elem = blk_rq_map_sg(lun->disk->queue, rq, &urq->sgv[0]); - if (n_elem < 0) { - /* Impossible, because blk_rq_map_sg should not hit ENOMEM. */ - printk(KERN_INFO "%s: failed request map (%d)\n", - lun->name, n_elem); - goto drop; - } - if (n_elem > UB_MAX_REQ_SG) { /* Paranoia */ - printk(KERN_WARNING "%s: request with %d segments\n", - lun->name, n_elem); - goto drop; - } - urq->nsg = n_elem; - - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - ub_cmd_build_packet(sc, lun, cmd, urq); - } else { - ub_cmd_build_block(sc, lun, cmd, urq); - } - cmd->state = UB_CMDST_INIT; - cmd->lun = lun; - cmd->done = ub_rw_cmd_done; - cmd->back = urq; - - cmd->tag = sc->tagcnt++; - if (ub_submit_scsi(sc, cmd) != 0) - goto drop; - - return 0; - -drop: - ub_put_cmd(lun, cmd); - ub_end_rq(rq, DID_ERROR << 16); - return 0; -} - -static void ub_cmd_build_block(struct ub_dev *sc, struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq) -{ - struct request *rq = urq->rq; - unsigned int block, nblks; - - if (rq_data_dir(rq) == WRITE) - cmd->dir = UB_DIR_WRITE; - else - cmd->dir = UB_DIR_READ; - - cmd->nsg = urq->nsg; - memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); - - /* - * build the command - * - * The call to blk_queue_logical_block_size() guarantees that request - * is aligned, but it is given in terms of 512 byte units, always. - */ - block = blk_rq_pos(rq) >> lun->capacity.bshift; - nblks = blk_rq_sectors(rq) >> lun->capacity.bshift; - - cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; - /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ - cmd->cdb[2] = block >> 24; - cmd->cdb[3] = block >> 16; - cmd->cdb[4] = block >> 8; - cmd->cdb[5] = block; - cmd->cdb[7] = nblks >> 8; - cmd->cdb[8] = nblks; - cmd->cdb_len = 10; - - cmd->len = blk_rq_bytes(rq); -} - -static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, - struct ub_scsi_cmd *cmd, struct ub_request *urq) -{ - struct request *rq = urq->rq; - - if (blk_rq_bytes(rq) == 0) { - cmd->dir = UB_DIR_NONE; - } else { - if (rq_data_dir(rq) == WRITE) - cmd->dir = UB_DIR_WRITE; - else - cmd->dir = UB_DIR_READ; - } - - cmd->nsg = urq->nsg; - memcpy(cmd->sgv, urq->sgv, sizeof(struct scatterlist) * cmd->nsg); - - memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); - cmd->cdb_len = rq->cmd_len; - - cmd->len = blk_rq_bytes(rq); - - /* - * To reapply this to every URB is not as incorrect as it looks. - * In return, we avoid any complicated tracking calculations. - */ - cmd->timeo = rq->timeout; -} - -static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct ub_lun *lun = cmd->lun; - struct ub_request *urq = cmd->back; - struct request *rq; - unsigned int scsi_status; - - rq = urq->rq; - - if (cmd->error == 0) { - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - if (cmd->act_len >= rq->resid_len) - rq->resid_len = 0; - else - rq->resid_len -= cmd->act_len; - scsi_status = 0; - } else { - if (cmd->act_len != cmd->len) { - scsi_status = SAM_STAT_CHECK_CONDITION; - } else { - scsi_status = 0; - } - } - } else { - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { - /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ - memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); - rq->sense_len = UB_SENSE_SIZE; - if (sc->top_sense[0] != 0) - scsi_status = SAM_STAT_CHECK_CONDITION; - else - scsi_status = DID_ERROR << 16; - } else { - if (cmd->error == -EIO && - (cmd->key == 0 || - cmd->key == MEDIUM_ERROR || - cmd->key == UNIT_ATTENTION)) { - if (ub_rw_cmd_retry(sc, lun, urq, cmd) == 0) - return; - } - scsi_status = SAM_STAT_CHECK_CONDITION; - } - } - - urq->rq = NULL; - - ub_put_cmd(lun, cmd); - ub_end_rq(rq, scsi_status); - blk_start_queue(lun->disk->queue); -} - -static void ub_end_rq(struct request *rq, unsigned int scsi_status) -{ - int error; - - if (scsi_status == 0) { - error = 0; - } else { - error = -EIO; - rq->errors = scsi_status; - } - __blk_end_request_all(rq, error); -} - -static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, - struct ub_request *urq, struct ub_scsi_cmd *cmd) -{ - - if (atomic_read(&sc->poison)) - return -ENXIO; - - ub_reset_enter(sc, urq->current_try); - - if (urq->current_try >= 3) - return -EIO; - urq->current_try++; - - /* Remove this if anyone complains of flooding. */ - printk(KERN_DEBUG "%s: dir %c len/act %d/%d " - "[sense %x %02x %02x] retry %d\n", - sc->name, UB_DIR_CHAR(cmd->dir), cmd->len, cmd->act_len, - cmd->key, cmd->asc, cmd->ascq, urq->current_try); - - memset(cmd, 0, sizeof(struct ub_scsi_cmd)); - ub_cmd_build_block(sc, lun, cmd, urq); - - cmd->state = UB_CMDST_INIT; - cmd->lun = lun; - cmd->done = ub_rw_cmd_done; - cmd->back = urq; - - cmd->tag = sc->tagcnt++; - -#if 0 /* Wasteful */ - return ub_submit_scsi(sc, cmd); -#else - ub_cmdq_add(sc, cmd); - return 0; -#endif -} - -/* - * Submit a regular SCSI operation (not an auto-sense). - * - * The Iron Law of Good Submit Routine is: - * Zero return - callback is done, Nonzero return - callback is not done. - * No exceptions. - * - * Host is assumed locked. - */ -static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - - if (cmd->state != UB_CMDST_INIT || - (cmd->dir != UB_DIR_NONE && cmd->len == 0)) { - return -EINVAL; - } - - ub_cmdq_add(sc, cmd); - /* - * We can call ub_scsi_dispatch(sc) right away here, but it's a little - * safer to jump to a tasklet, in case upper layers do something silly. - */ - tasklet_schedule(&sc->tasklet); - return 0; -} - -/* - * Submit the first URB for the queued command. - * This function does not deal with queueing in any way. - */ -static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct bulk_cb_wrap *bcb; - int rc; - - bcb = &sc->work_bcb; - - /* - * ``If the allocation length is eighteen or greater, and a device - * server returns less than eithteen bytes of data, the application - * client should assume that the bytes not transferred would have been - * zeroes had the device server returned those bytes.'' - * - * We zero sense for all commands so that when a packet request - * fails it does not return a stale sense. - */ - memset(&sc->top_sense, 0, UB_SENSE_SIZE); - - /* set up the command wrapper */ - bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN); - bcb->Tag = cmd->tag; /* Endianness is not important */ - bcb->DataTransferLength = cpu_to_le32(cmd->len); - bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0; - bcb->Lun = (cmd->lun != NULL) ? cmd->lun->num : 0; - bcb->Length = cmd->cdb_len; - - /* copy the command payload */ - memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE); - - UB_INIT_COMPLETION(sc->work_done); - - sc->last_pipe = sc->send_bulk_pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe, - bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - ub_complete(&sc->work_done); - return rc; - } - - sc->work_timer.expires = jiffies + UB_URB_TIMEOUT; - add_timer(&sc->work_timer); - - cmd->state = UB_CMDST_CMD; - return 0; -} - -/* - * Timeout handler. - */ -static void ub_urb_timeout(unsigned long arg) -{ - struct ub_dev *sc = (struct ub_dev *) arg; - unsigned long flags; - - spin_lock_irqsave(sc->lock, flags); - if (!ub_is_completed(&sc->work_done)) - usb_unlink_urb(&sc->work_urb); - spin_unlock_irqrestore(sc->lock, flags); -} - -/* - * Completion routine for the work URB. - * - * This can be called directly from usb_submit_urb (while we have - * the sc->lock taken) and from an interrupt (while we do NOT have - * the sc->lock taken). Therefore, bounce this off to a tasklet. - */ -static void ub_urb_complete(struct urb *urb) -{ - struct ub_dev *sc = urb->context; - - ub_complete(&sc->work_done); - tasklet_schedule(&sc->tasklet); -} - -static void ub_scsi_action(unsigned long _dev) -{ - struct ub_dev *sc = (struct ub_dev *) _dev; - unsigned long flags; - - spin_lock_irqsave(sc->lock, flags); - ub_scsi_dispatch(sc); - spin_unlock_irqrestore(sc->lock, flags); -} - -static void ub_scsi_dispatch(struct ub_dev *sc) -{ - struct ub_scsi_cmd *cmd; - int rc; - - while (!sc->reset && (cmd = ub_cmdq_peek(sc)) != NULL) { - if (cmd->state == UB_CMDST_DONE) { - ub_cmdq_pop(sc); - (*cmd->done)(sc, cmd); - } else if (cmd->state == UB_CMDST_INIT) { - if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0) - break; - cmd->error = rc; - cmd->state = UB_CMDST_DONE; - } else { - if (!ub_is_completed(&sc->work_done)) - break; - del_timer(&sc->work_timer); - ub_scsi_urb_compl(sc, cmd); - } - } -} - -static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct urb *urb = &sc->work_urb; - struct bulk_cs_wrap *bcs; - int endp; - int len; - int rc; - - if (atomic_read(&sc->poison)) { - ub_state_done(sc, cmd, -ENODEV); - return; - } - - endp = usb_pipeendpoint(sc->last_pipe); - if (usb_pipein(sc->last_pipe)) - endp |= USB_DIR_IN; - - if (cmd->state == UB_CMDST_CLEAR) { - if (urb->status == -EPIPE) { - /* - * STALL while clearning STALL. - * The control pipe clears itself - nothing to do. - */ - printk(KERN_NOTICE "%s: stall on control pipe\n", - sc->name); - goto Bad_End; - } - - /* - * We ignore the result for the halt clear. - */ - - usb_reset_endpoint(sc->dev, endp); - - ub_state_sense(sc, cmd); - - } else if (cmd->state == UB_CMDST_CLR2STS) { - if (urb->status == -EPIPE) { - printk(KERN_NOTICE "%s: stall on control pipe\n", - sc->name); - goto Bad_End; - } - - /* - * We ignore the result for the halt clear. - */ - - usb_reset_endpoint(sc->dev, endp); - - ub_state_stat(sc, cmd); - - } else if (cmd->state == UB_CMDST_CLRRS) { - if (urb->status == -EPIPE) { - printk(KERN_NOTICE "%s: stall on control pipe\n", - sc->name); - goto Bad_End; - } - - /* - * We ignore the result for the halt clear. - */ - - usb_reset_endpoint(sc->dev, endp); - - ub_state_stat_counted(sc, cmd); - - } else if (cmd->state == UB_CMDST_CMD) { - switch (urb->status) { - case 0: - break; - case -EOVERFLOW: - goto Bad_End; - case -EPIPE: - rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); - if (rc != 0) { - printk(KERN_NOTICE "%s: " - "unable to submit clear (%d)\n", - sc->name, rc); - /* - * This is typically ENOMEM or some other such shit. - * Retrying is pointless. Just do Bad End on it... - */ - ub_state_done(sc, cmd, rc); - return; - } - cmd->state = UB_CMDST_CLEAR; - return; - case -ESHUTDOWN: /* unplug */ - case -EILSEQ: /* unplug timeout on uhci */ - ub_state_done(sc, cmd, -ENODEV); - return; - default: - goto Bad_End; - } - if (urb->actual_length != US_BULK_CB_WRAP_LEN) { - goto Bad_End; - } - - if (cmd->dir == UB_DIR_NONE || cmd->nsg < 1) { - ub_state_stat(sc, cmd); - return; - } - - // udelay(125); // usb-storage has this - ub_data_start(sc, cmd); - - } else if (cmd->state == UB_CMDST_DATA) { - if (urb->status == -EPIPE) { - rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); - if (rc != 0) { - printk(KERN_NOTICE "%s: " - "unable to submit clear (%d)\n", - sc->name, rc); - ub_state_done(sc, cmd, rc); - return; - } - cmd->state = UB_CMDST_CLR2STS; - return; - } - if (urb->status == -EOVERFLOW) { - /* - * A babble? Failure, but we must transfer CSW now. - */ - cmd->error = -EOVERFLOW; /* A cheap trick... */ - ub_state_stat(sc, cmd); - return; - } - - if (cmd->dir == UB_DIR_WRITE) { - /* - * Do not continue writes in case of a failure. - * Doing so would cause sectors to be mixed up, - * which is worse than sectors lost. - * - * We must try to read the CSW, or many devices - * get confused. - */ - len = urb->actual_length; - if (urb->status != 0 || - len != cmd->sgv[cmd->current_sg].length) { - cmd->act_len += len; - - cmd->error = -EIO; - ub_state_stat(sc, cmd); - return; - } - - } else { - /* - * If an error occurs on read, we record it, and - * continue to fetch data in order to avoid bubble. - * - * As a small shortcut, we stop if we detect that - * a CSW mixed into data. - */ - if (urb->status != 0) - cmd->error = -EIO; - - len = urb->actual_length; - if (urb->status != 0 || - len != cmd->sgv[cmd->current_sg].length) { - if ((len & 0x1FF) == US_BULK_CS_WRAP_LEN) - goto Bad_End; - } - } - - cmd->act_len += urb->actual_length; - - if (++cmd->current_sg < cmd->nsg) { - ub_data_start(sc, cmd); - return; - } - ub_state_stat(sc, cmd); - - } else if (cmd->state == UB_CMDST_STAT) { - if (urb->status == -EPIPE) { - rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe); - if (rc != 0) { - printk(KERN_NOTICE "%s: " - "unable to submit clear (%d)\n", - sc->name, rc); - ub_state_done(sc, cmd, rc); - return; - } - - /* - * Having a stall when getting CSW is an error, so - * make sure uppper levels are not oblivious to it. - */ - cmd->error = -EIO; /* A cheap trick... */ - - cmd->state = UB_CMDST_CLRRS; - return; - } - - /* Catch everything, including -EOVERFLOW and other nasties. */ - if (urb->status != 0) - goto Bad_End; - - if (urb->actual_length == 0) { - ub_state_stat_counted(sc, cmd); - return; - } - - /* - * Check the returned Bulk protocol status. - * The status block has to be validated first. - */ - - bcs = &sc->work_bcs; - - if (sc->signature == cpu_to_le32(0)) { - /* - * This is the first reply, so do not perform the check. - * Instead, remember the signature the device uses - * for future checks. But do not allow a nul. - */ - sc->signature = bcs->Signature; - if (sc->signature == cpu_to_le32(0)) { - ub_state_stat_counted(sc, cmd); - return; - } - } else { - if (bcs->Signature != sc->signature) { - ub_state_stat_counted(sc, cmd); - return; - } - } - - if (bcs->Tag != cmd->tag) { - /* - * This usually happens when we disagree with the - * device's microcode about something. For instance, - * a few of them throw this after timeouts. They buffer - * commands and reply at commands we timed out before. - * Without flushing these replies we loop forever. - */ - ub_state_stat_counted(sc, cmd); - return; - } - - if (!sc->bad_resid) { - len = le32_to_cpu(bcs->Residue); - if (len != cmd->len - cmd->act_len) { - /* - * Only start ignoring if this cmd ended well. - */ - if (cmd->len == cmd->act_len) { - printk(KERN_NOTICE "%s: " - "bad residual %d of %d, ignoring\n", - sc->name, len, cmd->len); - sc->bad_resid = 1; - } - } - } - - switch (bcs->Status) { - case US_BULK_STAT_OK: - break; - case US_BULK_STAT_FAIL: - ub_state_sense(sc, cmd); - return; - case US_BULK_STAT_PHASE: - goto Bad_End; - default: - printk(KERN_INFO "%s: unknown CSW status 0x%x\n", - sc->name, bcs->Status); - ub_state_done(sc, cmd, -EINVAL); - return; - } - - /* Not zeroing error to preserve a babble indicator */ - if (cmd->error != 0) { - ub_state_sense(sc, cmd); - return; - } - cmd->state = UB_CMDST_DONE; - ub_cmdq_pop(sc); - (*cmd->done)(sc, cmd); - - } else if (cmd->state == UB_CMDST_SENSE) { - ub_state_done(sc, cmd, -EIO); - - } else { - printk(KERN_WARNING "%s: wrong command state %d\n", - sc->name, cmd->state); - ub_state_done(sc, cmd, -EINVAL); - return; - } - return; - -Bad_End: /* Little Excel is dead */ - ub_state_done(sc, cmd, -EIO); -} - -/* - * Factorization helper for the command state machine: - * Initiate a data segment transfer. - */ -static void ub_data_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct scatterlist *sg = &cmd->sgv[cmd->current_sg]; - int pipe; - int rc; - - UB_INIT_COMPLETION(sc->work_done); - - if (cmd->dir == UB_DIR_READ) - pipe = sc->recv_bulk_pipe; - else - pipe = sc->send_bulk_pipe; - sc->last_pipe = pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe, sg_virt(sg), - sg->length, ub_urb_complete, sc); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - ub_complete(&sc->work_done); - ub_state_done(sc, cmd, rc); - return; - } - - if (cmd->timeo) - sc->work_timer.expires = jiffies + cmd->timeo; - else - sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT; - add_timer(&sc->work_timer); - - cmd->state = UB_CMDST_DATA; -} - -/* - * Factorization helper for the command state machine: - * Finish the command. - */ -static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc) -{ - - cmd->error = rc; - cmd->state = UB_CMDST_DONE; - ub_cmdq_pop(sc); - (*cmd->done)(sc, cmd); -} - -/* - * Factorization helper for the command state machine: - * Submit a CSW read. - */ -static int __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - int rc; - - UB_INIT_COMPLETION(sc->work_done); - - sc->last_pipe = sc->recv_bulk_pipe; - usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe, - &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - /* XXX Clear stalls */ - ub_complete(&sc->work_done); - ub_state_done(sc, cmd, rc); - return -1; - } - - if (cmd->timeo) - sc->work_timer.expires = jiffies + cmd->timeo; - else - sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT; - add_timer(&sc->work_timer); - return 0; -} - -/* - * Factorization helper for the command state machine: - * Submit a CSW read and go to STAT state. - */ -static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - - if (__ub_state_stat(sc, cmd) != 0) - return; - - cmd->stat_count = 0; - cmd->state = UB_CMDST_STAT; -} - -/* - * Factorization helper for the command state machine: - * Submit a CSW read and go to STAT state with counter (along [C] path). - */ -static void ub_state_stat_counted(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - - if (++cmd->stat_count >= 4) { - ub_state_sense(sc, cmd); - return; - } - - if (__ub_state_stat(sc, cmd) != 0) - return; - - cmd->state = UB_CMDST_STAT; -} - -/* - * Factorization helper for the command state machine: - * Submit a REQUEST SENSE and go to SENSE state. - */ -static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct ub_scsi_cmd *scmd; - struct scatterlist *sg; - int rc; - - if (cmd->cdb[0] == REQUEST_SENSE) { - rc = -EPIPE; - goto error; - } - - scmd = &sc->top_rqs_cmd; - memset(scmd, 0, sizeof(struct ub_scsi_cmd)); - scmd->cdb[0] = REQUEST_SENSE; - scmd->cdb[4] = UB_SENSE_SIZE; - scmd->cdb_len = 6; - scmd->dir = UB_DIR_READ; - scmd->state = UB_CMDST_INIT; - scmd->nsg = 1; - sg = &scmd->sgv[0]; - sg_init_table(sg, UB_MAX_REQ_SG); - sg_set_page(sg, virt_to_page(sc->top_sense), UB_SENSE_SIZE, - (unsigned long)sc->top_sense & (PAGE_SIZE-1)); - scmd->len = UB_SENSE_SIZE; - scmd->lun = cmd->lun; - scmd->done = ub_top_sense_done; - scmd->back = cmd; - - scmd->tag = sc->tagcnt++; - - cmd->state = UB_CMDST_SENSE; - - ub_cmdq_insert(sc, scmd); - return; - -error: - ub_state_done(sc, cmd, rc); -} - -/* - * A helper for the command's state machine: - * Submit a stall clear. - */ -static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd, - int stalled_pipe) -{ - int endp; - struct usb_ctrlrequest *cr; - int rc; - - endp = usb_pipeendpoint(stalled_pipe); - if (usb_pipein (stalled_pipe)) - endp |= USB_DIR_IN; - - cr = &sc->work_cr; - cr->bRequestType = USB_RECIP_ENDPOINT; - cr->bRequest = USB_REQ_CLEAR_FEATURE; - cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); - cr->wIndex = cpu_to_le16(endp); - cr->wLength = cpu_to_le16(0); - - UB_INIT_COMPLETION(sc->work_done); - - usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, - (unsigned char*) cr, NULL, 0, ub_urb_complete, sc); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) { - ub_complete(&sc->work_done); - return rc; - } - - sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT; - add_timer(&sc->work_timer); - return 0; -} - -/* - */ -static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd) -{ - unsigned char *sense = sc->top_sense; - struct ub_scsi_cmd *cmd; - - /* - * Find the command which triggered the unit attention or a check, - * save the sense into it, and advance its state machine. - */ - if ((cmd = ub_cmdq_peek(sc)) == NULL) { - printk(KERN_WARNING "%s: sense done while idle\n", sc->name); - return; - } - if (cmd != scmd->back) { - printk(KERN_WARNING "%s: " - "sense done for wrong command 0x%x\n", - sc->name, cmd->tag); - return; - } - if (cmd->state != UB_CMDST_SENSE) { - printk(KERN_WARNING "%s: sense done with bad cmd state %d\n", - sc->name, cmd->state); - return; - } - - /* - * Ignoring scmd->act_len, because the buffer was pre-zeroed. - */ - cmd->key = sense[2] & 0x0F; - cmd->asc = sense[12]; - cmd->ascq = sense[13]; - - ub_scsi_urb_compl(sc, cmd); -} - -/* - * Reset management - */ - -static void ub_reset_enter(struct ub_dev *sc, int try) -{ - - if (sc->reset) { - /* This happens often on multi-LUN devices. */ - return; - } - sc->reset = try + 1; - -#if 0 /* Not needed because the disconnect waits for us. */ - unsigned long flags; - spin_lock_irqsave(&ub_lock, flags); - sc->openc++; - spin_unlock_irqrestore(&ub_lock, flags); -#endif - -#if 0 /* We let them stop themselves. */ - struct ub_lun *lun; - list_for_each_entry(lun, &sc->luns, link) { - blk_stop_queue(lun->disk->queue); - } -#endif - - schedule_work(&sc->reset_work); -} - -static void ub_reset_task(struct work_struct *work) -{ - struct ub_dev *sc = container_of(work, struct ub_dev, reset_work); - unsigned long flags; - struct ub_lun *lun; - int rc; - - if (!sc->reset) { - printk(KERN_WARNING "%s: Running reset unrequested\n", - sc->name); - return; - } - - if (atomic_read(&sc->poison)) { - ; - } else if ((sc->reset & 1) == 0) { - ub_sync_reset(sc); - msleep(700); /* usb-storage sleeps 6s (!) */ - ub_probe_clear_stall(sc, sc->recv_bulk_pipe); - ub_probe_clear_stall(sc, sc->send_bulk_pipe); - } else if (sc->dev->actconfig->desc.bNumInterfaces != 1) { - ; - } else { - rc = usb_lock_device_for_reset(sc->dev, sc->intf); - if (rc < 0) { - printk(KERN_NOTICE - "%s: usb_lock_device_for_reset failed (%d)\n", - sc->name, rc); - } else { - rc = usb_reset_device(sc->dev); - if (rc < 0) { - printk(KERN_NOTICE "%s: " - "usb_lock_device_for_reset failed (%d)\n", - sc->name, rc); - } - usb_unlock_device(sc->dev); - } - } - - /* - * In theory, no commands can be running while reset is active, - * so nobody can ask for another reset, and so we do not need any - * queues of resets or anything. We do need a spinlock though, - * to interact with block layer. - */ - spin_lock_irqsave(sc->lock, flags); - sc->reset = 0; - tasklet_schedule(&sc->tasklet); - list_for_each_entry(lun, &sc->luns, link) { - blk_start_queue(lun->disk->queue); - } - wake_up(&sc->reset_wait); - spin_unlock_irqrestore(sc->lock, flags); -} - -/* - * XXX Reset brackets are too much hassle to implement, so just stub them - * in order to prevent forced unbinding (which deadlocks solid when our - * ->disconnect method waits for the reset to complete and this kills keventd). - * - * XXX Tell Alan to move usb_unlock_device inside of usb_reset_device, - * or else the post_reset is invoked, and restats I/O on a locked device. - */ -static int ub_pre_reset(struct usb_interface *iface) { - return 0; -} - -static int ub_post_reset(struct usb_interface *iface) { - return 0; -} - -/* - * This is called from a process context. - */ -static void ub_revalidate(struct ub_dev *sc, struct ub_lun *lun) -{ - - lun->readonly = 0; /* XXX Query this from the device */ - - lun->capacity.nsec = 0; - lun->capacity.bsize = 512; - lun->capacity.bshift = 0; - - if (ub_sync_tur(sc, lun) != 0) - return; /* Not ready */ - lun->changed = 0; - - if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { - /* - * The retry here means something is wrong, either with the - * device, with the transport, or with our code. - * We keep this because sd.c has retries for capacity. - */ - if (ub_sync_read_cap(sc, lun, &lun->capacity) != 0) { - lun->capacity.nsec = 0; - lun->capacity.bsize = 512; - lun->capacity.bshift = 0; - } - } -} - -/* - * The open funcion. - * This is mostly needed to keep refcounting, but also to support - * media checks on removable media drives. - */ -static int ub_bd_open(struct block_device *bdev, fmode_t mode) -{ - struct ub_lun *lun = bdev->bd_disk->private_data; - struct ub_dev *sc = lun->udev; - unsigned long flags; - int rc; - - spin_lock_irqsave(&ub_lock, flags); - if (atomic_read(&sc->poison)) { - spin_unlock_irqrestore(&ub_lock, flags); - return -ENXIO; - } - sc->openc++; - spin_unlock_irqrestore(&ub_lock, flags); - - if (lun->removable || lun->readonly) - check_disk_change(bdev); - - /* - * The sd.c considers ->media_present and ->changed not equivalent, - * under some pretty murky conditions (a failure of READ CAPACITY). - * We may need it one day. - */ - if (lun->removable && lun->changed && !(mode & FMODE_NDELAY)) { - rc = -ENOMEDIUM; - goto err_open; - } - - if (lun->readonly && (mode & FMODE_WRITE)) { - rc = -EROFS; - goto err_open; - } - - return 0; - -err_open: - ub_put(sc); - return rc; -} - -static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode) -{ - int ret; - - mutex_lock(&ub_mutex); - ret = ub_bd_open(bdev, mode); - mutex_unlock(&ub_mutex); - - return ret; -} - - -/* - */ -static int ub_bd_release(struct gendisk *disk, fmode_t mode) -{ - struct ub_lun *lun = disk->private_data; - struct ub_dev *sc = lun->udev; - - mutex_lock(&ub_mutex); - ub_put(sc); - mutex_unlock(&ub_mutex); - - return 0; -} - -/* - * The ioctl interface. - */ -static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) -{ - void __user *usermem = (void __user *) arg; - int ret; - - mutex_lock(&ub_mutex); - ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem); - mutex_unlock(&ub_mutex); - - return ret; -} - -/* - * This is called by check_disk_change if we reported a media change. - * The main onjective here is to discover the features of the media such as - * the capacity, read-only status, etc. USB storage generally does not - * need to be spun up, but if we needed it, this would be the place. - * - * This call can sleep. - * - * The return code is not used. - */ -static int ub_bd_revalidate(struct gendisk *disk) -{ - struct ub_lun *lun = disk->private_data; - - ub_revalidate(lun->udev, lun); - - /* XXX Support sector size switching like in sr.c */ - blk_queue_logical_block_size(disk->queue, lun->capacity.bsize); - set_capacity(disk, lun->capacity.nsec); - // set_disk_ro(sdkp->disk, lun->readonly); - - return 0; -} - -/* - * The check is called by the block layer to verify if the media - * is still available. It is supposed to be harmless, lightweight and - * non-intrusive in case the media was not changed. - * - * This call can sleep. - * - * The return code is bool! - */ -static unsigned int ub_bd_check_events(struct gendisk *disk, - unsigned int clearing) -{ - struct ub_lun *lun = disk->private_data; - - if (!lun->removable) - return 0; - - /* - * We clean checks always after every command, so this is not - * as dangerous as it looks. If the TEST_UNIT_READY fails here, - * the device is actually not ready with operator or software - * intervention required. One dangerous item might be a drive which - * spins itself down, and come the time to write dirty pages, this - * will fail, then block layer discards the data. Since we never - * spin drives up, such devices simply cannot be used with ub anyway. - */ - if (ub_sync_tur(lun->udev, lun) != 0) { - lun->changed = 1; - return DISK_EVENT_MEDIA_CHANGE; - } - - return lun->changed ? DISK_EVENT_MEDIA_CHANGE : 0; -} - -static const struct block_device_operations ub_bd_fops = { - .owner = THIS_MODULE, - .open = ub_bd_unlocked_open, - .release = ub_bd_release, - .ioctl = ub_bd_ioctl, - .check_events = ub_bd_check_events, - .revalidate_disk = ub_bd_revalidate, -}; - -/* - * Common ->done routine for commands executed synchronously. - */ -static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd) -{ - struct completion *cop = cmd->back; - complete(cop); -} - -/* - * Test if the device has a check condition on it, synchronously. - */ -static int ub_sync_tur(struct ub_dev *sc, struct ub_lun *lun) -{ - struct ub_scsi_cmd *cmd; - enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) }; - unsigned long flags; - struct completion compl; - int rc; - - init_completion(&compl); - - rc = -ENOMEM; - if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) - goto err_alloc; - - cmd->cdb[0] = TEST_UNIT_READY; - cmd->cdb_len = 6; - cmd->dir = UB_DIR_NONE; - cmd->state = UB_CMDST_INIT; - cmd->lun = lun; /* This may be NULL, but that's ok */ - cmd->done = ub_probe_done; - cmd->back = &compl; - - spin_lock_irqsave(sc->lock, flags); - cmd->tag = sc->tagcnt++; - - rc = ub_submit_scsi(sc, cmd); - spin_unlock_irqrestore(sc->lock, flags); - - if (rc != 0) - goto err_submit; - - wait_for_completion(&compl); - - rc = cmd->error; - - if (rc == -EIO && cmd->key != 0) /* Retries for benh's key */ - rc = cmd->key; - -err_submit: - kfree(cmd); -err_alloc: - return rc; -} - -/* - * Read the SCSI capacity synchronously (for probing). - */ -static int ub_sync_read_cap(struct ub_dev *sc, struct ub_lun *lun, - struct ub_capacity *ret) -{ - struct ub_scsi_cmd *cmd; - struct scatterlist *sg; - char *p; - enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 }; - unsigned long flags; - unsigned int bsize, shift; - unsigned long nsec; - struct completion compl; - int rc; - - init_completion(&compl); - - rc = -ENOMEM; - if ((cmd = kzalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) - goto err_alloc; - p = (char *)cmd + sizeof(struct ub_scsi_cmd); - - cmd->cdb[0] = 0x25; - cmd->cdb_len = 10; - cmd->dir = UB_DIR_READ; - cmd->state = UB_CMDST_INIT; - cmd->nsg = 1; - sg = &cmd->sgv[0]; - sg_init_table(sg, UB_MAX_REQ_SG); - sg_set_page(sg, virt_to_page(p), 8, (unsigned long)p & (PAGE_SIZE-1)); - cmd->len = 8; - cmd->lun = lun; - cmd->done = ub_probe_done; - cmd->back = &compl; - - spin_lock_irqsave(sc->lock, flags); - cmd->tag = sc->tagcnt++; - - rc = ub_submit_scsi(sc, cmd); - spin_unlock_irqrestore(sc->lock, flags); - - if (rc != 0) - goto err_submit; - - wait_for_completion(&compl); - - if (cmd->error != 0) { - rc = -EIO; - goto err_read; - } - if (cmd->act_len != 8) { - rc = -EIO; - goto err_read; - } - - /* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */ - nsec = be32_to_cpu(*(__be32 *)p) + 1; - bsize = be32_to_cpu(*(__be32 *)(p + 4)); - switch (bsize) { - case 512: shift = 0; break; - case 1024: shift = 1; break; - case 2048: shift = 2; break; - case 4096: shift = 3; break; - default: - rc = -EDOM; - goto err_inv_bsize; - } - - ret->bsize = bsize; - ret->bshift = shift; - ret->nsec = nsec << shift; - rc = 0; - -err_inv_bsize: -err_read: -err_submit: - kfree(cmd); -err_alloc: - return rc; -} - -/* - */ -static void ub_probe_urb_complete(struct urb *urb) -{ - struct completion *cop = urb->context; - complete(cop); -} - -static void ub_probe_timeout(unsigned long arg) -{ - struct completion *cop = (struct completion *) arg; - complete(cop); -} - -/* - * Reset with a Bulk reset. - */ -static int ub_sync_reset(struct ub_dev *sc) -{ - int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; - struct usb_ctrlrequest *cr; - struct completion compl; - struct timer_list timer; - int rc; - - init_completion(&compl); - - cr = &sc->work_cr; - cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE; - cr->bRequest = US_BULK_RESET_REQUEST; - cr->wValue = cpu_to_le16(0); - cr->wIndex = cpu_to_le16(ifnum); - cr->wLength = cpu_to_le16(0); - - usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, - (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { - printk(KERN_WARNING - "%s: Unable to submit a bulk reset (%d)\n", sc->name, rc); - return rc; - } - - init_timer(&timer); - timer.function = ub_probe_timeout; - timer.data = (unsigned long) &compl; - timer.expires = jiffies + UB_CTRL_TIMEOUT; - add_timer(&timer); - - wait_for_completion(&compl); - - del_timer_sync(&timer); - usb_kill_urb(&sc->work_urb); - - return sc->work_urb.status; -} - -/* - * Get number of LUNs by the way of Bulk GetMaxLUN command. - */ -static int ub_sync_getmaxlun(struct ub_dev *sc) -{ - int ifnum = sc->intf->cur_altsetting->desc.bInterfaceNumber; - unsigned char *p; - enum { ALLOC_SIZE = 1 }; - struct usb_ctrlrequest *cr; - struct completion compl; - struct timer_list timer; - int nluns; - int rc; - - init_completion(&compl); - - rc = -ENOMEM; - if ((p = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL) - goto err_alloc; - *p = 55; - - cr = &sc->work_cr; - cr->bRequestType = USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE; - cr->bRequest = US_BULK_GET_MAX_LUN; - cr->wValue = cpu_to_le16(0); - cr->wIndex = cpu_to_le16(ifnum); - cr->wLength = cpu_to_le16(1); - - usb_fill_control_urb(&sc->work_urb, sc->dev, sc->recv_ctrl_pipe, - (unsigned char*) cr, p, 1, ub_probe_urb_complete, &compl); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) - goto err_submit; - - init_timer(&timer); - timer.function = ub_probe_timeout; - timer.data = (unsigned long) &compl; - timer.expires = jiffies + UB_CTRL_TIMEOUT; - add_timer(&timer); - - wait_for_completion(&compl); - - del_timer_sync(&timer); - usb_kill_urb(&sc->work_urb); - - if ((rc = sc->work_urb.status) < 0) - goto err_io; - - if (sc->work_urb.actual_length != 1) { - nluns = 0; - } else { - if ((nluns = *p) == 55) { - nluns = 0; - } else { - /* GetMaxLUN returns the maximum LUN number */ - nluns += 1; - if (nluns > UB_MAX_LUNS) - nluns = UB_MAX_LUNS; - } - } - - kfree(p); - return nluns; - -err_io: -err_submit: - kfree(p); -err_alloc: - return rc; -} - -/* - * Clear initial stalls. - */ -static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe) -{ - int endp; - struct usb_ctrlrequest *cr; - struct completion compl; - struct timer_list timer; - int rc; - - init_completion(&compl); - - endp = usb_pipeendpoint(stalled_pipe); - if (usb_pipein (stalled_pipe)) - endp |= USB_DIR_IN; - - cr = &sc->work_cr; - cr->bRequestType = USB_RECIP_ENDPOINT; - cr->bRequest = USB_REQ_CLEAR_FEATURE; - cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT); - cr->wIndex = cpu_to_le16(endp); - cr->wLength = cpu_to_le16(0); - - usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe, - (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl); - - if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) { - printk(KERN_WARNING - "%s: Unable to submit a probe clear (%d)\n", sc->name, rc); - return rc; - } - - init_timer(&timer); - timer.function = ub_probe_timeout; - timer.data = (unsigned long) &compl; - timer.expires = jiffies + UB_CTRL_TIMEOUT; - add_timer(&timer); - - wait_for_completion(&compl); - - del_timer_sync(&timer); - usb_kill_urb(&sc->work_urb); - - usb_reset_endpoint(sc->dev, endp); - - return 0; -} - -/* - * Get the pipe settings. - */ -static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev, - struct usb_interface *intf) -{ - struct usb_host_interface *altsetting = intf->cur_altsetting; - struct usb_endpoint_descriptor *ep_in = NULL; - struct usb_endpoint_descriptor *ep_out = NULL; - struct usb_endpoint_descriptor *ep; - int i; - - /* - * Find the endpoints we need. - * We are expecting a minimum of 2 endpoints - in and out (bulk). - * We will ignore any others. - */ - for (i = 0; i < altsetting->desc.bNumEndpoints; i++) { - ep = &altsetting->endpoint[i].desc; - - /* Is it a BULK endpoint? */ - if (usb_endpoint_xfer_bulk(ep)) { - /* BULK in or out? */ - if (usb_endpoint_dir_in(ep)) { - if (ep_in == NULL) - ep_in = ep; - } else { - if (ep_out == NULL) - ep_out = ep; - } - } - } - - if (ep_in == NULL || ep_out == NULL) { - printk(KERN_NOTICE "%s: failed endpoint check\n", sc->name); - return -ENODEV; - } - - /* Calculate and store the pipe values */ - sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0); - sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0); - sc->send_bulk_pipe = usb_sndbulkpipe(dev, - usb_endpoint_num(ep_out)); - sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, - usb_endpoint_num(ep_in)); - - return 0; -} - -/* - * Probing is done in the process context, which allows us to cheat - * and not to build a state machine for the discovery. - */ -static int ub_probe(struct usb_interface *intf, - const struct usb_device_id *dev_id) -{ - struct ub_dev *sc; - int nluns; - int rc; - int i; - - if (usb_usual_check_type(dev_id, USB_US_TYPE_UB)) - return -ENXIO; - - rc = -ENOMEM; - if ((sc = kzalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL) - goto err_core; - sc->lock = ub_next_lock(); - INIT_LIST_HEAD(&sc->luns); - usb_init_urb(&sc->work_urb); - tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc); - atomic_set(&sc->poison, 0); - INIT_WORK(&sc->reset_work, ub_reset_task); - init_waitqueue_head(&sc->reset_wait); - - init_timer(&sc->work_timer); - sc->work_timer.data = (unsigned long) sc; - sc->work_timer.function = ub_urb_timeout; - - ub_init_completion(&sc->work_done); - sc->work_done.done = 1; /* A little yuk, but oh well... */ - - sc->dev = interface_to_usbdev(intf); - sc->intf = intf; - // sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; - usb_set_intfdata(intf, sc); - usb_get_dev(sc->dev); - /* - * Since we give the interface struct to the block level through - * disk->driverfs_dev, we have to pin it. Otherwise, block_uevent - * oopses on close after a disconnect (kernels 2.6.16 and up). - */ - usb_get_intf(sc->intf); - - snprintf(sc->name, 12, DRV_NAME "(%d.%d)", - sc->dev->bus->busnum, sc->dev->devnum); - - /* XXX Verify that we can handle the device (from descriptors) */ - - if (ub_get_pipes(sc, sc->dev, intf) != 0) - goto err_dev_desc; - - /* - * At this point, all USB initialization is done, do upper layer. - * We really hate halfway initialized structures, so from the - * invariants perspective, this ub_dev is fully constructed at - * this point. - */ - - /* - * This is needed to clear toggles. It is a problem only if we do - * `rmmod ub && modprobe ub` without disconnects, but we like that. - */ -#if 0 /* iPod Mini fails if we do this (big white iPod works) */ - ub_probe_clear_stall(sc, sc->recv_bulk_pipe); - ub_probe_clear_stall(sc, sc->send_bulk_pipe); -#endif - - /* - * The way this is used by the startup code is a little specific. - * A SCSI check causes a USB stall. Our common case code sees it - * and clears the check, after which the device is ready for use. - * But if a check was not present, any command other than - * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE). - * - * If we neglect to clear the SCSI check, the first real command fails - * (which is the capacity readout). We clear that and retry, but why - * causing spurious retries for no reason. - * - * Revalidation may start with its own TEST_UNIT_READY, but that one - * has to succeed, so we clear checks with an additional one here. - * In any case it's not our business how revaliadation is implemented. - */ - for (i = 0; i < 3; i++) { /* Retries for the schwag key from KS'04 */ - if ((rc = ub_sync_tur(sc, NULL)) <= 0) break; - if (rc != 0x6) break; - msleep(10); - } - - nluns = 1; - for (i = 0; i < 3; i++) { - if ((rc = ub_sync_getmaxlun(sc)) < 0) - break; - if (rc != 0) { - nluns = rc; - break; - } - msleep(100); - } - - for (i = 0; i < nluns; i++) { - ub_probe_lun(sc, i); - } - return 0; - -err_dev_desc: - usb_set_intfdata(intf, NULL); - usb_put_intf(sc->intf); - usb_put_dev(sc->dev); - kfree(sc); -err_core: - return rc; -} - -static int ub_probe_lun(struct ub_dev *sc, int lnum) -{ - struct ub_lun *lun; - struct request_queue *q; - struct gendisk *disk; - int rc; - - rc = -ENOMEM; - if ((lun = kzalloc(sizeof(struct ub_lun), GFP_KERNEL)) == NULL) - goto err_alloc; - lun->num = lnum; - - rc = -ENOSR; - if ((lun->id = ub_id_get()) == -1) - goto err_id; - - lun->udev = sc; - - snprintf(lun->name, 16, DRV_NAME "%c(%d.%d.%d)", - lun->id + 'a', sc->dev->bus->busnum, sc->dev->devnum, lun->num); - - lun->removable = 1; /* XXX Query this from the device */ - lun->changed = 1; /* ub_revalidate clears only */ - ub_revalidate(sc, lun); - - rc = -ENOMEM; - if ((disk = alloc_disk(UB_PARTS_PER_LUN)) == NULL) - goto err_diskalloc; - - sprintf(disk->disk_name, DRV_NAME "%c", lun->id + 'a'); - disk->major = UB_MAJOR; - disk->first_minor = lun->id * UB_PARTS_PER_LUN; - disk->fops = &ub_bd_fops; - disk->private_data = lun; - disk->driverfs_dev = &sc->intf->dev; - - rc = -ENOMEM; - if ((q = blk_init_queue(ub_request_fn, sc->lock)) == NULL) - goto err_blkqinit; - - disk->queue = q; - - blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - blk_queue_max_segments(q, UB_MAX_REQ_SG); - blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ - blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); - blk_queue_logical_block_size(q, lun->capacity.bsize); - - lun->disk = disk; - q->queuedata = lun; - list_add(&lun->link, &sc->luns); - - set_capacity(disk, lun->capacity.nsec); - if (lun->removable) - disk->flags |= GENHD_FL_REMOVABLE; - - add_disk(disk); - - return 0; - -err_blkqinit: - put_disk(disk); -err_diskalloc: - ub_id_put(lun->id); -err_id: - kfree(lun); -err_alloc: - return rc; -} - -static void ub_disconnect(struct usb_interface *intf) -{ - struct ub_dev *sc = usb_get_intfdata(intf); - struct ub_lun *lun; - unsigned long flags; - - /* - * Prevent ub_bd_release from pulling the rug from under us. - * XXX This is starting to look like a kref. - * XXX Why not to take this ref at probe time? - */ - spin_lock_irqsave(&ub_lock, flags); - sc->openc++; - spin_unlock_irqrestore(&ub_lock, flags); - - /* - * Fence stall clearings, operations triggered by unlinkings and so on. - * We do not attempt to unlink any URBs, because we do not trust the - * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway. - */ - atomic_set(&sc->poison, 1); - - /* - * Wait for reset to end, if any. - */ - wait_event(sc->reset_wait, !sc->reset); - - /* - * Blow away queued commands. - * - * Actually, this never works, because before we get here - * the HCD terminates outstanding URB(s). It causes our - * SCSI command queue to advance, commands fail to submit, - * and the whole queue drains. So, we just use this code to - * print warnings. - */ - spin_lock_irqsave(sc->lock, flags); - { - struct ub_scsi_cmd *cmd; - int cnt = 0; - while ((cmd = ub_cmdq_peek(sc)) != NULL) { - cmd->error = -ENOTCONN; - cmd->state = UB_CMDST_DONE; - ub_cmdq_pop(sc); - (*cmd->done)(sc, cmd); - cnt++; - } - if (cnt != 0) { - printk(KERN_WARNING "%s: " - "%d was queued after shutdown\n", sc->name, cnt); - } - } - spin_unlock_irqrestore(sc->lock, flags); - - /* - * Unregister the upper layer. - */ - list_for_each_entry(lun, &sc->luns, link) { - del_gendisk(lun->disk); - /* - * I wish I could do: - * queue_flag_set(QUEUE_FLAG_DEAD, q); - * As it is, we rely on our internal poisoning and let - * the upper levels to spin furiously failing all the I/O. - */ - } - - /* - * Testing for -EINPROGRESS is always a bug, so we are bending - * the rules a little. - */ - spin_lock_irqsave(sc->lock, flags); - if (sc->work_urb.status == -EINPROGRESS) { /* janitors: ignore */ - printk(KERN_WARNING "%s: " - "URB is active after disconnect\n", sc->name); - } - spin_unlock_irqrestore(sc->lock, flags); - - /* - * There is virtually no chance that other CPU runs a timeout so long - * after ub_urb_complete should have called del_timer, but only if HCD - * didn't forget to deliver a callback on unlink. - */ - del_timer_sync(&sc->work_timer); - - /* - * At this point there must be no commands coming from anyone - * and no URBs left in transit. - */ - - ub_put(sc); -} - -static struct usb_driver ub_driver = { - .name = "ub", - .probe = ub_probe, - .disconnect = ub_disconnect, - .id_table = ub_usb_ids, - .pre_reset = ub_pre_reset, - .post_reset = ub_post_reset, -}; - -static int __init ub_init(void) -{ - int rc; - int i; - - pr_info("'Low Performance USB Block' driver is deprecated. " - "Please switch to usb-storage\n"); - for (i = 0; i < UB_QLOCK_NUM; i++) - spin_lock_init(&ub_qlockv[i]); - - if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0) - goto err_regblkdev; - - if ((rc = usb_register(&ub_driver)) != 0) - goto err_register; - - usb_usual_set_present(USB_US_TYPE_UB); - return 0; - -err_register: - unregister_blkdev(UB_MAJOR, DRV_NAME); -err_regblkdev: - return rc; -} - -static void __exit ub_exit(void) -{ - usb_deregister(&ub_driver); - - unregister_blkdev(UB_MAJOR, DRV_NAME); - usb_usual_clear_present(USB_US_TYPE_UB); -} - -module_init(ub_init); -module_exit(ub_exit); - -MODULE_LICENSE("GPL"); diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c0bbeb470754..0bdde8fba397 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -14,6 +14,9 @@ #define PART_BITS 4 +static bool use_bio; +module_param(use_bio, bool, S_IRUGO); + static int major; static DEFINE_IDA(vd_index_ida); @@ -23,6 +26,7 @@ struct virtio_blk { struct virtio_device *vdev; struct virtqueue *vq; + wait_queue_head_t queue_wait; /* The disk structure for the kernel. */ struct gendisk *disk; @@ -51,53 +55,244 @@ struct virtio_blk struct virtblk_req { struct request *req; + struct bio *bio; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; + struct work_struct work; + struct virtio_blk *vblk; + int flags; u8 status; + struct scatterlist sg[]; +}; + +enum { + VBLK_IS_FLUSH = 1, + VBLK_REQ_FLUSH = 2, + VBLK_REQ_DATA = 4, + VBLK_REQ_FUA = 8, }; -static void blk_done(struct virtqueue *vq) +static inline int virtblk_result(struct virtblk_req *vbr) +{ + switch (vbr->status) { + case VIRTIO_BLK_S_OK: + return 0; + case VIRTIO_BLK_S_UNSUPP: + return -ENOTTY; + default: + return -EIO; + } +} + +static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, + gfp_t gfp_mask) { - struct virtio_blk *vblk = vq->vdev->priv; struct virtblk_req *vbr; - unsigned int len; - unsigned long flags; - spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); - while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - int error; + vbr = mempool_alloc(vblk->pool, gfp_mask); + if (!vbr) + return NULL; - switch (vbr->status) { - case VIRTIO_BLK_S_OK: - error = 0; - break; - case VIRTIO_BLK_S_UNSUPP: - error = -ENOTTY; - break; - default: - error = -EIO; + vbr->vblk = vblk; + if (use_bio) + sg_init_table(vbr->sg, vblk->sg_elems); + + return vbr; +} + +static void virtblk_add_buf_wait(struct virtio_blk *vblk, + struct virtblk_req *vbr, + unsigned long out, + unsigned long in) +{ + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait_exclusive(&vblk->queue_wait, &wait, + TASK_UNINTERRUPTIBLE); + + spin_lock_irq(vblk->disk->queue->queue_lock); + if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, + GFP_ATOMIC) < 0) { + spin_unlock_irq(vblk->disk->queue->queue_lock); + io_schedule(); + } else { + virtqueue_kick(vblk->vq); + spin_unlock_irq(vblk->disk->queue->queue_lock); break; } - switch (vbr->req->cmd_type) { - case REQ_TYPE_BLOCK_PC: - vbr->req->resid_len = vbr->in_hdr.residual; - vbr->req->sense_len = vbr->in_hdr.sense_len; - vbr->req->errors = vbr->in_hdr.errors; - break; - case REQ_TYPE_SPECIAL: - vbr->req->errors = (error != 0); - break; - default: - break; + } + + finish_wait(&vblk->queue_wait, &wait); +} + +static inline void virtblk_add_req(struct virtblk_req *vbr, + unsigned int out, unsigned int in) +{ + struct virtio_blk *vblk = vbr->vblk; + + spin_lock_irq(vblk->disk->queue->queue_lock); + if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, + GFP_ATOMIC) < 0)) { + spin_unlock_irq(vblk->disk->queue->queue_lock); + virtblk_add_buf_wait(vblk, vbr, out, in); + return; + } + virtqueue_kick(vblk->vq); + spin_unlock_irq(vblk->disk->queue->queue_lock); +} + +static int virtblk_bio_send_flush(struct virtblk_req *vbr) +{ + unsigned int out = 0, in = 0; + + vbr->flags |= VBLK_IS_FLUSH; + vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; + vbr->out_hdr.sector = 0; + vbr->out_hdr.ioprio = 0; + sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); + sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status)); + + virtblk_add_req(vbr, out, in); + + return 0; +} + +static int virtblk_bio_send_data(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + unsigned int num, out = 0, in = 0; + struct bio *bio = vbr->bio; + + vbr->flags &= ~VBLK_IS_FLUSH; + vbr->out_hdr.type = 0; + vbr->out_hdr.sector = bio->bi_sector; + vbr->out_hdr.ioprio = bio_prio(bio); + + sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); + + num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out); + + sg_set_buf(&vbr->sg[num + out + in++], &vbr->status, + sizeof(vbr->status)); + + if (num) { + if (bio->bi_rw & REQ_WRITE) { + vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; + out += num; + } else { + vbr->out_hdr.type |= VIRTIO_BLK_T_IN; + in += num; } + } + + virtblk_add_req(vbr, out, in); + + return 0; +} + +static void virtblk_bio_send_data_work(struct work_struct *work) +{ + struct virtblk_req *vbr; + + vbr = container_of(work, struct virtblk_req, work); + + virtblk_bio_send_data(vbr); +} + +static void virtblk_bio_send_flush_work(struct work_struct *work) +{ + struct virtblk_req *vbr; + + vbr = container_of(work, struct virtblk_req, work); + + virtblk_bio_send_flush(vbr); +} + +static inline void virtblk_request_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + struct request *req = vbr->req; + int error = virtblk_result(vbr); + + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { + req->resid_len = vbr->in_hdr.residual; + req->sense_len = vbr->in_hdr.sense_len; + req->errors = vbr->in_hdr.errors; + } else if (req->cmd_type == REQ_TYPE_SPECIAL) { + req->errors = (error != 0); + } + + __blk_end_request_all(req, error); + mempool_free(vbr, vblk->pool); +} + +static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + + if (vbr->flags & VBLK_REQ_DATA) { + /* Send out the actual write data */ + INIT_WORK(&vbr->work, virtblk_bio_send_data_work); + queue_work(virtblk_wq, &vbr->work); + } else { + bio_endio(vbr->bio, virtblk_result(vbr)); + mempool_free(vbr, vblk->pool); + } +} + +static inline void virtblk_bio_data_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; - __blk_end_request_all(vbr->req, error); + if (unlikely(vbr->flags & VBLK_REQ_FUA)) { + /* Send out a flush before end the bio */ + vbr->flags &= ~VBLK_REQ_DATA; + INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); + queue_work(virtblk_wq, &vbr->work); + } else { + bio_endio(vbr->bio, virtblk_result(vbr)); mempool_free(vbr, vblk->pool); } +} + +static inline void virtblk_bio_done(struct virtblk_req *vbr) +{ + if (unlikely(vbr->flags & VBLK_IS_FLUSH)) + virtblk_bio_flush_done(vbr); + else + virtblk_bio_data_done(vbr); +} + +static void virtblk_done(struct virtqueue *vq) +{ + struct virtio_blk *vblk = vq->vdev->priv; + bool bio_done = false, req_done = false; + struct virtblk_req *vbr; + unsigned long flags; + unsigned int len; + + spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); + do { + virtqueue_disable_cb(vq); + while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { + if (vbr->bio) { + virtblk_bio_done(vbr); + bio_done = true; + } else { + virtblk_request_done(vbr); + req_done = true; + } + } + } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ - blk_start_queue(vblk->disk->queue); + if (req_done) + blk_start_queue(vblk->disk->queue); spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); + + if (bio_done) + wake_up(&vblk->queue_wait); } static bool do_req(struct request_queue *q, struct virtio_blk *vblk, @@ -106,13 +301,13 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, unsigned long num, out = 0, in = 0; struct virtblk_req *vbr; - vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); + vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); if (!vbr) /* When another request finishes we'll try again. */ return false; vbr->req = req; - + vbr->bio = NULL; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; @@ -172,7 +367,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) { + if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, + GFP_ATOMIC) < 0) { mempool_free(vbr, vblk->pool); return false; } @@ -180,7 +376,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, return true; } -static void do_virtblk_request(struct request_queue *q) +static void virtblk_request(struct request_queue *q) { struct virtio_blk *vblk = q->queuedata; struct request *req; @@ -203,6 +399,34 @@ static void do_virtblk_request(struct request_queue *q) virtqueue_kick(vblk->vq); } +static void virtblk_make_request(struct request_queue *q, struct bio *bio) +{ + struct virtio_blk *vblk = q->queuedata; + struct virtblk_req *vbr; + + BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); + + vbr = virtblk_alloc_req(vblk, GFP_NOIO); + if (!vbr) { + bio_endio(bio, -ENOMEM); + return; + } + + vbr->bio = bio; + vbr->flags = 0; + if (bio->bi_rw & REQ_FLUSH) + vbr->flags |= VBLK_REQ_FLUSH; + if (bio->bi_rw & REQ_FUA) + vbr->flags |= VBLK_REQ_FUA; + if (bio->bi_size) + vbr->flags |= VBLK_REQ_DATA; + + if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) + virtblk_bio_send_flush(vbr); + else + virtblk_bio_send_data(vbr); +} + /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) @@ -360,7 +584,7 @@ static int init_vq(struct virtio_blk *vblk) int err = 0; /* We expect one virtqueue, for output. */ - vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests"); + vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests"); if (IS_ERR(vblk->vq)) err = PTR_ERR(vblk->vq); @@ -477,6 +701,8 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) struct virtio_blk *vblk; struct request_queue *q; int err, index; + int pool_size; + u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; @@ -506,10 +732,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + init_waitqueue_head(&vblk->queue_wait); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); + INIT_WORK(&vblk->config_work, virtblk_config_changed_work); vblk->config_enable = true; @@ -517,7 +745,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) if (err) goto out_free_vblk; - vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); + pool_size = sizeof(struct virtblk_req); + if (use_bio) + pool_size += sizeof(struct scatterlist) * sg_elems; + vblk->pool = mempool_create_kmalloc_pool(1, pool_size); if (!vblk->pool) { err = -ENOMEM; goto out_free_vq; @@ -530,12 +761,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) goto out_mempool; } - q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL); + q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); if (!q) { err = -ENOMEM; goto out_put_disk; } + if (use_bio) + blk_queue_make_request(q, virtblk_make_request); q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -620,7 +853,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); - add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 73f196ca713f..280a13846e6c 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -42,6 +42,7 @@ #include <xen/events.h> #include <xen/page.h> +#include <xen/xen.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> #include "common.h" @@ -337,7 +338,7 @@ static void xen_blkbk_unmap(struct pending_req *req) invcount++; } - ret = gnttab_unmap_refs(unmap, pages, invcount, false); + ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); BUG_ON(ret); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 2c2d2e5c1597..007db8986e84 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -670,7 +670,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info) spin_unlock_irqrestore(&info->io_lock, flags); /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work_sync(&info->work); + flush_work(&info->work); del_gendisk(info->gd); @@ -719,7 +719,7 @@ static void blkif_free(struct blkfront_info *info, int suspend) spin_unlock_irq(&info->io_lock); /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work_sync(&info->work); + flush_work(&info->work); /* Free resources associated with old device channel. */ if (info->ring_ref != GRANT_INVALID_REF) { |