diff options
author | Matias Bjørling <m@bjorling.me> | 2016-01-12 07:49:19 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-01-12 08:21:16 -0700 |
commit | 069368e91879a3a640cfae4bdc1f9f8cc99c93a0 (patch) | |
tree | 57a2d4df9b3c9d705afc058b952b3ab450364b6d /drivers/lightnvm | |
parent | c27278bddd75a3ee755c8e83c6bcc3fdd7271ef6 (diff) | |
download | talos-op-linux-069368e91879a3a640cfae4bdc1f9f8cc99c93a0.tar.gz talos-op-linux-069368e91879a3a640cfae4bdc1f9f8cc99c93a0.zip |
lightnvm: move ppa erase logic to core
A device may function in single, dual or quad plane mode. The gennvm
media manager manages this with explicit helpers. They convert a single
ppa to 1, 2 or 4 separate ppas in a ppa list. To aid implementation of
recovery and system blocks, this functionality can be moved directly
into the core.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/lightnvm')
-rw-r--r-- | drivers/lightnvm/core.c | 67 | ||||
-rw-r--r-- | drivers/lightnvm/gennvm.c | 68 |
2 files changed, 71 insertions, 64 deletions
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index 8f41b245cd55..6134339aa6cf 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -192,6 +192,73 @@ int nvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk) } EXPORT_SYMBOL(nvm_erase_blk); +void nvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) +{ + int i; + + if (rqd->nr_pages > 1) { + for (i = 0; i < rqd->nr_pages; i++) + rqd->ppa_list[i] = dev_to_generic_addr(dev, + rqd->ppa_list[i]); + } else { + rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); + } +} +EXPORT_SYMBOL(nvm_addr_to_generic_mode); + +void nvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) +{ + int i; + + if (rqd->nr_pages > 1) { + for (i = 0; i < rqd->nr_pages; i++) + rqd->ppa_list[i] = generic_to_dev_addr(dev, + rqd->ppa_list[i]); + } else { + rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); + } +} +EXPORT_SYMBOL(nvm_generic_to_addr_mode); + +int nvm_erase_ppa(struct nvm_dev *dev, struct ppa_addr ppa) +{ + int plane_cnt = 0, pl_idx, ret; + struct nvm_rq rqd; + + if (!dev->ops->erase_block) + return 0; + + if (dev->plane_mode == NVM_PLANE_SINGLE) { + rqd.nr_pages = 1; + rqd.ppa_addr = ppa; + } else { + plane_cnt = (1 << dev->plane_mode); + rqd.nr_pages = plane_cnt; + + rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, + &rqd.dma_ppa_list); + if (!rqd.ppa_list) { + pr_err("nvm: failed to allocate dma memory\n"); + return -ENOMEM; + } + + for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { + ppa.g.pl = pl_idx; + rqd.ppa_list[pl_idx] = ppa; + } + } + + nvm_generic_to_addr_mode(dev, &rqd); + + ret = dev->ops->erase_block(dev, &rqd); + + if (plane_cnt) + nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); + + return ret; +} +EXPORT_SYMBOL(nvm_erase_ppa); + static int nvm_core_init(struct nvm_dev *dev) { struct nvm_id *id = &dev->identity; diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c index 2a96ff6923f0..373be72816bd 100644 --- a/drivers/lightnvm/gennvm.c +++ b/drivers/lightnvm/gennvm.c @@ -317,39 +317,13 @@ static void gennvm_put_blk(struct nvm_dev *dev, struct nvm_block *blk) spin_unlock(&vlun->lock); } -static void gennvm_addr_to_generic_mode(struct nvm_dev *dev, struct nvm_rq *rqd) -{ - int i; - - if (rqd->nr_pages > 1) { - for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = dev_to_generic_addr(dev, - rqd->ppa_list[i]); - } else { - rqd->ppa_addr = dev_to_generic_addr(dev, rqd->ppa_addr); - } -} - -static void gennvm_generic_to_addr_mode(struct nvm_dev *dev, struct nvm_rq *rqd) -{ - int i; - - if (rqd->nr_pages > 1) { - for (i = 0; i < rqd->nr_pages; i++) - rqd->ppa_list[i] = generic_to_dev_addr(dev, - rqd->ppa_list[i]); - } else { - rqd->ppa_addr = generic_to_dev_addr(dev, rqd->ppa_addr); - } -} - static int gennvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd) { if (!dev->ops->submit_io) return -ENODEV; /* Convert address space */ - gennvm_generic_to_addr_mode(dev, rqd); + nvm_generic_to_addr_mode(dev, rqd); rqd->dev = dev; return dev->ops->submit_io(dev, rqd); @@ -391,7 +365,7 @@ static void gennvm_mark_blk_bad(struct nvm_dev *dev, struct nvm_rq *rqd) if (dev->ops->set_bb_tbl(dev, rqd, 1)) return; - gennvm_addr_to_generic_mode(dev, rqd); + nvm_addr_to_generic_mode(dev, rqd); /* look up blocks and mark them as bad */ if (rqd->nr_pages > 1) @@ -425,43 +399,9 @@ static int gennvm_end_io(struct nvm_rq *rqd, int error) static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk, unsigned long flags) { - int plane_cnt = 0, pl_idx, ret; - struct ppa_addr addr; - struct nvm_rq rqd; - - if (!dev->ops->erase_block) - return 0; - - addr = block_to_ppa(dev, blk); - - if (dev->plane_mode == NVM_PLANE_SINGLE) { - rqd.nr_pages = 1; - rqd.ppa_addr = addr; - } else { - plane_cnt = (1 << dev->plane_mode); - rqd.nr_pages = plane_cnt; - - rqd.ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, - &rqd.dma_ppa_list); - if (!rqd.ppa_list) { - pr_err("gennvm: failed to allocate dma memory\n"); - return -ENOMEM; - } + struct ppa_addr addr = block_to_ppa(dev, blk); - for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) { - addr.g.pl = pl_idx; - rqd.ppa_list[pl_idx] = addr; - } - } - - gennvm_generic_to_addr_mode(dev, &rqd); - - ret = dev->ops->erase_block(dev, &rqd); - - if (plane_cnt) - nvm_dev_dma_free(dev, rqd.ppa_list, rqd.dma_ppa_list); - - return ret; + return nvm_erase_ppa(dev, addr); } static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid) |