diff options
Diffstat (limited to 'drivers/infiniband/hw/qedr/main.c')
-rw-r--r-- | drivers/infiniband/hw/qedr/main.c | 112 |
1 files changed, 67 insertions, 45 deletions
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 3ac8aa5ef37d..b5851fd67d4f 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -35,8 +35,9 @@ #include <rdma/ib_user_verbs.h> #include <linux/netdevice.h> #include <linux/iommu.h> +#include <linux/pci.h> #include <net/addrconf.h> -#include <linux/qed/qede_roce.h> + #include <linux/qed/qed_chain.h> #include <linux/qed/qed_if.h> #include "qedr.h" @@ -170,7 +171,7 @@ static int qedr_register_device(struct qedr_dev *dev) dev->ibdev.get_port_immutable = qedr_port_immutable; dev->ibdev.get_netdev = qedr_get_netdev; - dev->ibdev.dma_device = &dev->pdev->dev; + dev->ibdev.dev.parent = &dev->pdev->dev; dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str; @@ -275,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) QED_CHAIN_CNT_TYPE_U16, n_entries, sizeof(struct regpair *), - &cnq->pbl); + &cnq->pbl, NULL); if (rc) goto err4; @@ -340,43 +341,58 @@ static void qedr_remove_sysfiles(struct qedr_dev *dev) static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev) { struct pci_dev *bridge; - u32 val; - - dev->atomic_cap = IB_ATOMIC_NONE; + u32 ctl2, cap2; + u16 flags; + int rc; bridge = pdev->bus->self; if (!bridge) - return; - - /* Check whether we are connected directly or via a switch */ - while (bridge && bridge->bus->parent) { - DP_DEBUG(dev, QEDR_MSG_INIT, - "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n", - bridge->bus->number, bridge->bus->primary); - /* Need to check Atomic Op Routing Supported all the way to - * root complex. - */ - pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); - if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) { - pcie_capability_clear_word(pdev, - PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - return; - } + goto disable; + + /* Check atomic routing support all the way to root complex */ + while (bridge->bus->parent) { + rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); + if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) + goto disable; + + rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); + if (rc) + goto disable; + + rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); + if (rc) + goto disable; + + if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) || + (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)) + goto disable; bridge = bridge->bus->parent->self; } - bridge = pdev->bus->self; - /* according to bridge capability */ - pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val); - if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) { - pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - dev->atomic_cap = IB_ATOMIC_GLOB; - } else { - pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, - PCI_EXP_DEVCTL2_ATOMIC_REQ); - } + rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags); + if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2)) + goto disable; + + rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2); + if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64)) + goto disable; + + /* Set atomic operations */ + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + dev->atomic_cap = IB_ATOMIC_GLOB; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n"); + + return; + +disable: + pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2, + PCI_EXP_DEVCTL2_ATOMIC_REQ); + dev->atomic_cap = IB_ATOMIC_NONE; + + DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n"); + } static const struct qed_rdma_ops *qed_ops; @@ -423,14 +439,21 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) cq->arm_flags = 0; - if (cq->ibcq.comp_handler) + if (!cq->destroyed && cq->ibcq.comp_handler) (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); + /* The CQ's CNQ notification counter is checked before + * destroying the CQ in a busy-wait loop that waits for all of + * the CQ's CNQ interrupts to be processed. It is increased + * here, only after the completion handler, to ensure that the + * the handler is not running when the CQ is destroyed. + */ + cq->cnq_notif++; + sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); cnq->n_comp++; - } qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, @@ -587,9 +610,8 @@ void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle) #define EVENT_TYPE_CQ 1 #define EVENT_TYPE_QP 2 struct qedr_dev *dev = (struct qedr_dev *)context; - union event_ring_data *data = fw_handle; - u64 roce_handle64 = ((u64)data->roce_handle.hi << 32) + - data->roce_handle.lo; + struct regpair *async_handle = (struct regpair *)fw_handle; + u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo; u8 event_type = EVENT_TYPE_NOT_DEFINED; struct ib_event event; struct ib_cq *ibcq; @@ -864,9 +886,9 @@ static void qedr_mac_address_change(struct qedr_dev *dev) memcpy(&sgid->raw[8], guid, sizeof(guid)); /* Update LL2 */ - rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev, - dev->gsi_ll2_mac_address, - dev->ndev->dev_addr); + rc = dev->ops->ll2_set_mac_filter(dev->cdev, + dev->gsi_ll2_mac_address, + dev->ndev->dev_addr); ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr); @@ -880,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) * initialization done before RoCE driver notifies * event to stack. */ -static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) +static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event) { switch (event) { case QEDE_UP: @@ -909,12 +931,12 @@ static struct qedr_driver qedr_drv = { static int __init qedr_init_module(void) { - return qede_roce_register_driver(&qedr_drv); + return qede_rdma_register_driver(&qedr_drv); } static void __exit qedr_exit_module(void) { - qede_roce_unregister_driver(&qedr_drv); + qede_rdma_unregister_driver(&qedr_drv); } module_init(qedr_init_module); |