diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-21 10:54:01 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-21 10:54:01 -0700 |
commit | ecd06f28836f9d10c92a4740b5aaf7caf4324927 (patch) | |
tree | 8ceb01018cb6701836bc127ca2a8ebe1c9b3ee57 /drivers/nvme | |
parent | e59f30b417499414a1f9afaaad3a42937060832f (diff) | |
parent | e9c9346e20c1b18a3ec30defd5ff7134bcc0da6d (diff) | |
download | talos-op-linux-ecd06f28836f9d10c92a4740b5aaf7caf4324927.tar.gz talos-op-linux-ecd06f28836f9d10c92a4740b5aaf7caf4324927.zip |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"A set of fixes that missed the merge window, mostly due to me being
away around that time.
Nothing major here, a mix of nvme cleanups and fixes, and one fix for
the badblocks handling"
* 'for-linus' of git://git.kernel.dk/linux-block:
nvmet: use symbolic constants for CNS values
nvme: use symbolic constants for CNS values
nvme.h: add an enum for cns values
nvme.h: don't use uuid_be
nvme.h: resync with nvme-cli
nvme: Add tertiary number to NVME_VS
nvme : Add sysfs entry for NVMe CMBs when appropriate
nvme: don't schedule multiple resets
nvme: Delete created IO queues on reset
nvme: Stop probing a removed device
badblocks: fix overlapping check for clearing
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 14 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 77 | ||||
-rw-r--r-- | drivers/nvme/host/scsi.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 8 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/discovery.c | 4 |
6 files changed, 71 insertions, 38 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 329381a28edf..79e679d12f3b 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -554,7 +554,7 @@ int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id) /* gcc-4.4.4 (at least) has issues with initializers and anon unions */ c.identify.opcode = nvme_admin_identify; - c.identify.cns = cpu_to_le32(1); + c.identify.cns = cpu_to_le32(NVME_ID_CNS_CTRL); *id = kmalloc(sizeof(struct nvme_id_ctrl), GFP_KERNEL); if (!*id) @@ -572,7 +572,7 @@ static int nvme_identify_ns_list(struct nvme_ctrl *dev, unsigned nsid, __le32 *n struct nvme_command c = { }; c.identify.opcode = nvme_admin_identify; - c.identify.cns = cpu_to_le32(2); + c.identify.cns = cpu_to_le32(NVME_ID_CNS_NS_ACTIVE_LIST); c.identify.nsid = cpu_to_le32(nsid); return nvme_submit_sync_cmd(dev->admin_q, &c, ns_list, 0x1000); } @@ -900,9 +900,9 @@ static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) return -ENODEV; } - if (ns->ctrl->vs >= NVME_VS(1, 1)) + if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); - if (ns->ctrl->vs >= NVME_VS(1, 2)) + if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); return 0; @@ -1086,6 +1086,8 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled) int ret; while ((ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &csts)) == 0) { + if (csts == ~0) + return -ENODEV; if ((csts & NVME_CSTS_RDY) == bit) break; @@ -1240,7 +1242,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) } page_shift = NVME_CAP_MPSMIN(cap) + 12; - if (ctrl->vs >= NVME_VS(1, 1)) + if (ctrl->vs >= NVME_VS(1, 1, 0)) ctrl->subsystem = NVME_CAP_NSSRC(cap); ret = nvme_identify_ctrl(ctrl, &id); @@ -1840,7 +1842,7 @@ static void nvme_scan_work(struct work_struct *work) return; nn = le32_to_cpu(id->nn); - if (ctrl->vs >= NVME_VS(1, 1) && + if (ctrl->vs >= NVME_VS(1, 1, 0) && !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { if (!nvme_scan_ns_list(ctrl, nn)) goto done; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0fc99f0f2571..0248d0e21fee 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -99,6 +99,7 @@ struct nvme_dev { dma_addr_t cmb_dma_addr; u64 cmb_size; u32 cmbsz; + u32 cmbloc; struct nvme_ctrl ctrl; struct completion ioq_wait; }; @@ -893,7 +894,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); nvme_dev_disable(dev, false); - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); /* * Mark the request as handled, since the inline shutdown @@ -1214,7 +1215,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); struct nvme_queue *nvmeq; - dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1) ? + dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? NVME_CAP_NSSRC(cap) : 0; if (dev->subsystem && @@ -1291,7 +1292,7 @@ static void nvme_watchdog_timer(unsigned long data) /* Skip controllers under certain specific conditions. */ if (nvme_should_reset(dev, csts)) { - if (queue_work(nvme_workq, &dev->reset_work)) + if (!nvme_reset(dev)) dev_warn(dev->dev, "Failed status: 0x%x, reset controller.\n", csts); @@ -1331,28 +1332,37 @@ static int nvme_create_io_queues(struct nvme_dev *dev) return ret >= 0 ? 0 : ret; } +static ssize_t nvme_cmb_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); + + return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", + ndev->cmbloc, ndev->cmbsz); +} +static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); + static void __iomem *nvme_map_cmb(struct nvme_dev *dev) { u64 szu, size, offset; - u32 cmbloc; resource_size_t bar_size; struct pci_dev *pdev = to_pci_dev(dev->dev); void __iomem *cmb; dma_addr_t dma_addr; - if (!use_cmb_sqes) - return NULL; - dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!(NVME_CMB_SZ(dev->cmbsz))) return NULL; + dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); - cmbloc = readl(dev->bar + NVME_REG_CMBLOC); + if (!use_cmb_sqes) + return NULL; szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); size = szu * NVME_CMB_SZ(dev->cmbsz); - offset = szu * NVME_CMB_OFST(cmbloc); - bar_size = pci_resource_len(pdev, NVME_CMB_BIR(cmbloc)); + offset = szu * NVME_CMB_OFST(dev->cmbloc); + bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); if (offset > bar_size) return NULL; @@ -1365,7 +1375,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) if (size > bar_size - offset) size = bar_size - offset; - dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(cmbloc)) + offset; + dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; cmb = ioremap_wc(dma_addr, size); if (!cmb) return NULL; @@ -1511,9 +1521,9 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) return 0; } -static void nvme_disable_io_queues(struct nvme_dev *dev) +static void nvme_disable_io_queues(struct nvme_dev *dev, int queues) { - int pass, queues = dev->online_queues - 1; + int pass; unsigned long timeout; u8 opcode = nvme_admin_delete_sq; @@ -1616,9 +1626,25 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev->q_depth); } - if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2)) + /* + * CMBs can currently only exist on >=1.2 PCIe devices. We only + * populate sysfs if a CMB is implemented. Note that we add the + * CMB attribute to the nvme_ctrl kobj which removes the need to remove + * it on exit. Since nvme_dev_attrs_group has no name we can pass + * NULL as final argument to sysfs_add_file_to_group. + */ + + if (readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 2, 0)) { dev->cmb = nvme_map_cmb(dev); + if (dev->cmbsz) { + if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, + &dev_attr_cmb.attr, NULL)) + dev_warn(dev->dev, + "failed to add sysfs attribute for CMB\n"); + } + } + pci_enable_pcie_error_reporting(pdev); pci_save_state(pdev); return 0; @@ -1649,7 +1675,7 @@ static void nvme_pci_disable(struct nvme_dev *dev) static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) { - int i; + int i, queues; u32 csts = -1; del_timer_sync(&dev->watchdog_timer); @@ -1660,6 +1686,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) csts = readl(dev->bar + NVME_REG_CSTS); } + queues = dev->online_queues - 1; for (i = dev->queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); @@ -1671,7 +1698,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) if (dev->queue_count) nvme_suspend_queue(dev->queues[0]); } else { - nvme_disable_io_queues(dev); + nvme_disable_io_queues(dev, queues); nvme_disable_admin_queue(dev, shutdown); } nvme_pci_disable(dev); @@ -1818,11 +1845,10 @@ static int nvme_reset(struct nvme_dev *dev) { if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) return -ENODEV; - + if (work_busy(&dev->reset_work)) + return -ENODEV; if (!queue_work(nvme_workq, &dev->reset_work)) return -EBUSY; - - flush_work(&dev->reset_work); return 0; } @@ -1846,7 +1872,12 @@ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl) { - return nvme_reset(to_nvme_dev(ctrl)); + struct nvme_dev *dev = to_nvme_dev(ctrl); + int ret = nvme_reset(dev); + + if (!ret) + flush_work(&dev->reset_work); + return ret; } static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { @@ -1940,7 +1971,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) if (prepare) nvme_dev_disable(dev, false); else - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); } static void nvme_shutdown(struct pci_dev *pdev) @@ -2009,7 +2040,7 @@ static int nvme_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - queue_work(nvme_workq, &ndev->reset_work); + nvme_reset(ndev); return 0; } #endif @@ -2048,7 +2079,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) dev_info(dev->ctrl.device, "restart after slot reset\n"); pci_restore_state(pdev); - queue_work(nvme_workq, &dev->reset_work); + nvme_reset(dev); return PCI_ERS_RESULT_RECOVERED; } diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index c2a0a1c7d05d..3eaa4d27801e 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -606,7 +606,7 @@ static int nvme_fill_device_id_eui64(struct nvme_ns *ns, struct sg_io_hdr *hdr, eui = id_ns->eui64; len = sizeof(id_ns->eui64); - if (ns->ctrl->vs >= NVME_VS(1, 2)) { + if (ns->ctrl->vs >= NVME_VS(1, 2, 0)) { if (bitmap_empty(eui, len * 8)) { eui = id_ns->nguid; len = sizeof(id_ns->nguid); @@ -679,7 +679,7 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, { int res; - if (ns->ctrl->vs >= NVME_VS(1, 1)) { + if (ns->ctrl->vs >= NVME_VS(1, 1, 0)) { res = nvme_fill_device_id_eui64(ns, hdr, resp, alloc_len); if (res != -EOPNOTSUPP) return res; diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 7ab9c9381b98..6fe4c48a21e4 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -199,7 +199,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) */ /* we support multiple ports and multiples hosts: */ - id->mic = (1 << 0) | (1 << 1); + id->cmic = (1 << 0) | (1 << 1); /* no limit on data transfer sizes for now */ id->mdts = 0; @@ -511,13 +511,13 @@ int nvmet_parse_admin_cmd(struct nvmet_req *req) case nvme_admin_identify: req->data_len = 4096; switch (le32_to_cpu(cmd->identify.cns)) { - case 0x00: + case NVME_ID_CNS_NS: req->execute = nvmet_execute_identify_ns; return 0; - case 0x01: + case NVME_ID_CNS_CTRL: req->execute = nvmet_execute_identify_ctrl; return 0; - case 0x02: + case NVME_ID_CNS_NS_ACTIVE_LIST: req->execute = nvmet_execute_identify_nslist; return 0; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 6559d5afa7bf..b4cacb6f0258 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -882,7 +882,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, if (!subsys) return NULL; - subsys->ver = (1 << 16) | (2 << 8) | 1; /* NVMe 1.2.1 */ + subsys->ver = NVME_VS(1, 2, 1); /* NVMe 1.2.1 */ switch (type) { case NVME_NQN_NVME: diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 6f65646e89cf..12f39eea569f 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -54,7 +54,7 @@ static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr, /* we support only dynamic controllers */ e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC); e->asqsz = cpu_to_le16(NVMF_AQ_DEPTH); - e->nqntype = type; + e->subtype = type; memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE); memcpy(e->traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE); memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE); @@ -187,7 +187,7 @@ int nvmet_parse_discovery_cmd(struct nvmet_req *req) case nvme_admin_identify: req->data_len = 4096; switch (le32_to_cpu(cmd->identify.cns)) { - case 0x01: + case NVME_ID_CNS_CTRL: req->execute = nvmet_execute_identify_disc_ctrl; return 0; |