diff options
author | Keith Busch <keith.busch@intel.com> | 2015-10-02 10:37:29 -0600 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-10-09 10:40:36 -0600 |
commit | 0a7385ad69f0f210c5cfbfd334b42423a6e05e5a (patch) | |
tree | 676c0f49cf661bbcb65455a0a5a739acf4e745e9 /drivers/block | |
parent | 5105aa555c1c681ae281ea0d6108efd0a5d8a5e8 (diff) | |
download | talos-op-linux-0a7385ad69f0f210c5cfbfd334b42423a6e05e5a.tar.gz talos-op-linux-0a7385ad69f0f210c5cfbfd334b42423a6e05e5a.zip |
NVMe: Simplify device resume on io queue failure
Releasing IO queues and disks was done in a work queue outside the
controller resume context to delete namespaces if the controller failed
after a resume from suspend. This is unnecessary since we can resume
a device asynchronously.
This patch makes resume use probe_work so it can directly remove
namespaces if the device is manageable but not IO capable. Since the
deleting disks was the only reason we had the convoluted "reset_workfn",
this patch removes that unnecessary indirection.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme-core.c | 34 |
1 files changed, 6 insertions, 28 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 904b54fcbbcd..bf35846558c8 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1285,7 +1285,6 @@ static void nvme_abort_req(struct request *req) list_del_init(&dev->node); dev_warn(dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); - dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); out: spin_unlock_irqrestore(&dev_list_lock, flags); @@ -2089,7 +2088,6 @@ static int nvme_kthread(void *data) dev_warn(dev->dev, "Failed status: %x, reset controller\n", readl(&dev->bar->csts)); - dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); continue; } @@ -3025,14 +3023,6 @@ static int nvme_remove_dead_ctrl(void *arg) return 0; } -static void nvme_remove_disks(struct work_struct *ws) -{ - struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work); - - nvme_free_queues(dev, 1); - nvme_dev_remove(dev); -} - static int nvme_dev_resume(struct nvme_dev *dev) { int ret; @@ -3041,10 +3031,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) if (ret) return ret; if (dev->online_queues < 2) { - spin_lock(&dev_list_lock); - dev->reset_workfn = nvme_remove_disks; - queue_work(nvme_workq, &dev->reset_work); - spin_unlock(&dev_list_lock); + dev_warn(dev->dev, "IO queues not created\n"); + nvme_free_queues(dev, 1); + nvme_dev_remove(dev); } else { nvme_unfreeze_queues(dev); nvme_dev_add(dev); @@ -3091,12 +3080,6 @@ static void nvme_reset_failed_dev(struct work_struct *ws) nvme_dev_reset(dev); } -static void nvme_reset_workfn(struct work_struct *work) -{ - struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); - dev->reset_workfn(work); -} - static int nvme_reset(struct nvme_dev *dev) { int ret = -EBUSY; @@ -3106,7 +3089,6 @@ static int nvme_reset(struct nvme_dev *dev) spin_lock(&dev_list_lock); if (!work_pending(&dev->reset_work)) { - dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); ret = 0; } @@ -3159,8 +3141,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto free; INIT_LIST_HEAD(&dev->namespaces); - dev->reset_workfn = nvme_reset_failed_dev; - INIT_WORK(&dev->reset_work, nvme_reset_workfn); + INIT_WORK(&dev->reset_work, nvme_reset_failed_dev); dev->dev = get_device(&pdev->dev); pci_set_drvdata(pdev, dev); result = nvme_set_instance(dev); @@ -3223,7 +3204,7 @@ static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) if (prepare) nvme_dev_shutdown(dev); else - nvme_dev_resume(dev); + schedule_work(&dev->probe_work); } static void nvme_shutdown(struct pci_dev *pdev) @@ -3277,10 +3258,7 @@ static int nvme_resume(struct device *dev) struct pci_dev *pdev = to_pci_dev(dev); struct nvme_dev *ndev = pci_get_drvdata(pdev); - if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) { - ndev->reset_workfn = nvme_reset_failed_dev; - queue_work(nvme_workq, &ndev->reset_work); - } + schedule_work(&ndev->probe_work); return 0; } #endif |