diff options
author | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 10:34:51 +1100 |
---|---|---|
committer | David Woodhouse <dwmw2@infradead.org> | 2007-01-18 10:34:51 +1100 |
commit | 9cdf083f981b8d37b3212400a359368661385099 (patch) | |
tree | aa15a6a08ad87e650dea40fb59b3180bef0d345b /drivers/infiniband/ulp | |
parent | e499e01d234a31d59679b7b1e1cf628d917ba49a (diff) | |
parent | a8b3485287731978899ced11f24628c927890e78 (diff) | |
download | blackbird-op-linux-9cdf083f981b8d37b3212400a359368661385099.tar.gz blackbird-op-linux-9cdf083f981b8d37b3212400a359368661385099.zip |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib.h | 22 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_ib.c | 100 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_main.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_multicast.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iscsi_iser.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_memory.c | 130 | ||||
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_verbs.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 101 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 4 |
11 files changed, 241 insertions, 220 deletions
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 0b8a79d53a00..07deee8f81ce 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -105,12 +105,12 @@ struct ipoib_mcast; struct ipoib_rx_buf { struct sk_buff *skb; - dma_addr_t mapping; + u64 mapping; }; struct ipoib_tx_buf { struct sk_buff *skb; - DECLARE_PCI_UNMAP_ADDR(mapping) + u64 mapping; }; /* @@ -136,11 +136,11 @@ struct ipoib_dev_priv { struct list_head multicast_list; struct rb_root multicast_tree; - struct work_struct pkey_task; - struct work_struct mcast_task; + struct delayed_work pkey_task; + struct delayed_work mcast_task; struct work_struct flush_task; struct work_struct restart_task; - struct work_struct ah_reap_task; + struct delayed_work ah_reap_task; struct ib_device *ca; u8 port; @@ -233,7 +233,7 @@ static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh) } struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neigh); -void ipoib_neigh_free(struct ipoib_neigh *neigh); +void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh); extern struct workqueue_struct *ipoib_workqueue; @@ -254,13 +254,13 @@ int ipoib_add_pkey_attr(struct net_device *dev); void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn); -void ipoib_reap_ah(void *dev_ptr); +void ipoib_reap_ah(struct work_struct *work); void ipoib_flush_paths(struct net_device *dev); struct ipoib_dev_priv *ipoib_intf_alloc(const char *format); int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port); -void ipoib_ib_dev_flush(void *dev); +void ipoib_ib_dev_flush(struct work_struct *work); void ipoib_ib_dev_cleanup(struct net_device *dev); int ipoib_ib_dev_open(struct net_device *dev); @@ -271,10 +271,10 @@ int ipoib_ib_dev_stop(struct net_device *dev); int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port); void ipoib_dev_cleanup(struct net_device *dev); -void ipoib_mcast_join_task(void *dev_ptr); +void ipoib_mcast_join_task(struct work_struct *work); void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb); -void ipoib_mcast_restart_task(void *dev_ptr); +void ipoib_mcast_restart_task(struct work_struct *work); int ipoib_mcast_start_thread(struct net_device *dev); int ipoib_mcast_stop_thread(struct net_device *dev, int flush); @@ -312,7 +312,7 @@ void ipoib_event(struct ib_event_handler *handler, int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey); int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey); -void ipoib_pkey_poll(void *dev); +void ipoib_pkey_poll(struct work_struct *work); int ipoib_pkey_dev_delay_open(struct net_device *dev); #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 8bf5e9ec7c95..59d9594ed6d9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -109,9 +109,8 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id) ret = ib_post_recv(priv->qp, ¶m, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); - dma_unmap_single(priv->ca->dma_device, - priv->rx_ring[id].mapping, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } @@ -123,7 +122,7 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; - dma_addr_t addr; + u64 addr; skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4); if (!skb) @@ -136,10 +135,9 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id) */ skb_reserve(skb, 4); - addr = dma_map_single(priv->ca->dma_device, - skb->data, IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(addr))) { + addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE, + DMA_FROM_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { dev_kfree_skb_any(skb); return -EIO; } @@ -174,7 +172,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; - dma_addr_t addr; + u64 addr; ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n", wr_id, wc->opcode, wc->status); @@ -193,8 +191,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, addr, + IPOIB_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; @@ -212,8 +210,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); - dma_unmap_single(priv->ca->dma_device, addr, - IPOIB_BUF_SIZE, DMA_FROM_DEVICE); + ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE); skb_put(skb, wc->byte_len); skb_pull(skb, IB_GRH_BYTES); @@ -261,10 +258,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) tx_req = &priv->tx_ring[wr_id]; - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(tx_req, mapping), - tx_req->skb->len, - DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, tx_req->mapping, + tx_req->skb->len, DMA_TO_DEVICE); ++priv->stats.tx_packets; priv->stats.tx_bytes += tx_req->skb->len; @@ -311,7 +306,7 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, - dma_addr_t addr, int len) + u64 addr, int len) { struct ib_send_wr *bad_wr; @@ -330,7 +325,7 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; - dma_addr_t addr; + u64 addr; if (unlikely(skb->len > dev->mtu + INFINIBAND_ALEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", @@ -353,21 +348,20 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; - addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(addr))) { + addr = ib_dma_map_single(priv->ca, skb->data, skb->len, + DMA_TO_DEVICE); + if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { ++priv->stats.tx_errors; dev_kfree_skb_any(skb); return; } - pci_unmap_addr_set(tx_req, mapping, addr); + tx_req->mapping = addr; if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, addr, skb->len))) { ipoib_warn(priv, "post_send failed\n"); ++priv->stats.tx_errors; - dma_unmap_single(priv->ca->dma_device, addr, skb->len, - DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); } else { dev->trans_start = jiffies; @@ -400,10 +394,11 @@ static void __ipoib_reap_ah(struct net_device *dev) spin_unlock_irq(&priv->tx_lock); } -void ipoib_reap_ah(void *dev_ptr) +void ipoib_reap_ah(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, ah_reap_task.work); + struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); @@ -537,24 +532,27 @@ int ipoib_ib_dev_stop(struct net_device *dev) while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(tx_req, mapping), - tx_req->skb->len, - DMA_TO_DEVICE); + ib_dma_unmap_single(priv->ca, + tx_req->mapping, + tx_req->skb->len, + DMA_TO_DEVICE); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; } - for (i = 0; i < ipoib_recvq_size; ++i) - if (priv->rx_ring[i].skb) { - dma_unmap_single(priv->ca->dma_device, - pci_unmap_addr(&priv->rx_ring[i], - mapping), - IPOIB_BUF_SIZE, - DMA_FROM_DEVICE); - dev_kfree_skb_any(priv->rx_ring[i].skb); - priv->rx_ring[i].skb = NULL; - } + for (i = 0; i < ipoib_recvq_size; ++i) { + struct ipoib_rx_buf *rx_req; + + rx_req = &priv->rx_ring[i]; + if (!rx_req->skb) + continue; + ib_dma_unmap_single(priv->ca, + rx_req->mapping, + IPOIB_BUF_SIZE, + DMA_FROM_DEVICE); + dev_kfree_skb_any(rx_req->skb); + rx_req->skb = NULL; + } goto timeout; } @@ -613,10 +611,11 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) return 0; } -void ipoib_ib_dev_flush(void *_dev) +void ipoib_ib_dev_flush(struct work_struct *work) { - struct net_device *dev = (struct net_device *)_dev; - struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv; + struct ipoib_dev_priv *cpriv, *priv = + container_of(work, struct ipoib_dev_priv, flush_task); + struct net_device *dev = priv->dev; if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) ) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); @@ -638,14 +637,14 @@ void ipoib_ib_dev_flush(void *_dev) */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_ib_dev_up(dev); - ipoib_mcast_restart_task(dev); + ipoib_mcast_restart_task(&priv->restart_task); } mutex_lock(&priv->vlan_mutex); /* Flush any child interfaces too */ list_for_each_entry(cpriv, &priv->child_intfs, list) - ipoib_ib_dev_flush(cpriv->dev); + ipoib_ib_dev_flush(&cpriv->flush_task); mutex_unlock(&priv->vlan_mutex); } @@ -672,10 +671,11 @@ void ipoib_ib_dev_cleanup(struct net_device *dev) * change async notification is available. */ -void ipoib_pkey_poll(void *dev_ptr) +void ipoib_pkey_poll(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, pkey_task.work); + struct net_device *dev = priv->dev; ipoib_pkey_dev_check_presence(dev); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 85522daeb946..705eb1d0e554 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -264,7 +264,7 @@ static void path_free(struct net_device *dev, struct ipoib_path *path) if (neigh->ah) ipoib_put_ah(neigh->ah); - ipoib_neigh_free(neigh); + ipoib_neigh_free(dev, neigh); } spin_unlock_irqrestore(&priv->lock, flags); @@ -497,8 +497,6 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) return; } - skb_queue_head_init(&neigh->queue); - /* * We can only be called from ipoib_start_xmit, so we're * inside tx_lock -- no need to save/restore flags. @@ -525,10 +523,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev) ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb->dst->neighbour->ha)); } else { neigh->ah = NULL; - __skb_queue_tail(&neigh->queue, skb); if (!path->query && path_rec_start(dev, path)) goto err_list; + + __skb_queue_tail(&neigh->queue, skb); } spin_unlock(&priv->lock); @@ -538,7 +537,7 @@ err_list: list_del(&neigh->list); err_path: - ipoib_neigh_free(neigh); + ipoib_neigh_free(dev, neigh); ++priv->stats.tx_dropped; dev_kfree_skb_any(skb); @@ -655,7 +654,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) */ ipoib_put_ah(neigh->ah); list_del(&neigh->list); - ipoib_neigh_free(neigh); + ipoib_neigh_free(dev, neigh); spin_unlock(&priv->lock); ipoib_path_lookup(skb, dev); goto out; @@ -786,7 +785,7 @@ static void ipoib_neigh_destructor(struct neighbour *n) if (neigh->ah) ah = neigh->ah; list_del(&neigh->list); - ipoib_neigh_free(neigh); + ipoib_neigh_free(n->dev, neigh); } spin_unlock_irqrestore(&priv->lock, flags); @@ -805,13 +804,20 @@ struct ipoib_neigh *ipoib_neigh_alloc(struct neighbour *neighbour) neigh->neighbour = neighbour; *to_ipoib_neigh(neighbour) = neigh; + skb_queue_head_init(&neigh->queue); return neigh; } -void ipoib_neigh_free(struct ipoib_neigh *neigh) +void ipoib_neigh_free(struct net_device *dev, struct ipoib_neigh *neigh) { + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct sk_buff *skb; *to_ipoib_neigh(neigh->neighbour) = NULL; + while ((skb = __skb_dequeue(&neigh->queue))) { + ++priv->stats.tx_dropped; + dev_kfree_skb_any(skb); + } kfree(neigh); } @@ -933,11 +939,11 @@ static void ipoib_setup(struct net_device *dev) INIT_LIST_HEAD(&priv->dead_ahs); INIT_LIST_HEAD(&priv->multicast_list); - INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev); - INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev); - INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev); - INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev); - INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev); + INIT_DELAYED_WORK(&priv->pkey_task, ipoib_pkey_poll); + INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task); + INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush); + INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task); + INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah); } struct ipoib_dev_priv *ipoib_intf_alloc(const char *name) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 3faa1820f0e9..b04b72ca32ed 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -114,7 +114,7 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast) */ if (neigh->ah) ipoib_put_ah(neigh->ah); - ipoib_neigh_free(neigh); + ipoib_neigh_free(dev, neigh); } spin_unlock_irqrestore(&priv->lock, flags); @@ -399,7 +399,8 @@ static void ipoib_mcast_join_complete(int status, mcast->backoff = 1; mutex_lock(&mcast_mutex); if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_work(ipoib_workqueue, &priv->mcast_task); + queue_delayed_work(ipoib_workqueue, + &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); complete(&mcast->done); return; @@ -435,7 +436,8 @@ static void ipoib_mcast_join_complete(int status, if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) { if (status == -ETIMEDOUT) - queue_work(ipoib_workqueue, &priv->mcast_task); + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, + 0); else queue_delayed_work(ipoib_workqueue, &priv->mcast_task, mcast->backoff * HZ); @@ -517,10 +519,11 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast, mcast->query_id = ret; } -void ipoib_mcast_join_task(void *dev_ptr) +void ipoib_mcast_join_task(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, mcast_task.work); + struct net_device *dev = priv->dev; if (!test_bit(IPOIB_MCAST_RUN, &priv->flags)) return; @@ -610,7 +613,7 @@ int ipoib_mcast_start_thread(struct net_device *dev) mutex_lock(&mcast_mutex); if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags)) - queue_work(ipoib_workqueue, &priv->mcast_task); + queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0); mutex_unlock(&mcast_mutex); spin_lock_irq(&priv->lock); @@ -818,10 +821,11 @@ void ipoib_mcast_dev_flush(struct net_device *dev) } } -void ipoib_mcast_restart_task(void *dev_ptr) +void ipoib_mcast_restart_task(struct work_struct *work) { - struct net_device *dev = dev_ptr; - struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_dev_priv *priv = + container_of(work, struct ipoib_dev_priv, restart_task); + struct net_device *dev = priv->dev; struct dev_mc_list *mclist; struct ipoib_mcast *mcast, *tmcast; LIST_HEAD(remove_list); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9b2041e25d59..dd221eda3ea6 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -177,7 +177,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, * - if yes, the mtask is recycled at iscsi_complete_pdu * - if no, the mtask is recycled at iser_snd_completion */ - if (error && error != -EAGAIN) + if (error && error != -ENOBUFS) iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return error; @@ -241,7 +241,7 @@ iscsi_iser_ctask_xmit(struct iscsi_conn *conn, error = iscsi_iser_ctask_xmit_unsol_data(conn, ctask); iscsi_iser_ctask_xmit_exit: - if (error && error != -EAGAIN) + if (error && error != -ENOBUFS) iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED); return error; } diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9c53916f28c2..cae8c96a55f8 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -182,7 +182,7 @@ struct iser_regd_buf { struct iser_mem_reg reg; /* memory registration info */ void *virt_addr; struct iser_device *device; /* device->device for dma_unmap */ - dma_addr_t dma_addr; /* if non zero, addr for dma_unmap */ + u64 dma_addr; /* if non zero, addr for dma_unmap */ enum dma_data_direction direction; /* direction for dma_unmap */ unsigned int data_size; atomic_t ref_count; /* refcount, freed when dec to 0 */ @@ -283,7 +283,7 @@ struct iser_global { struct mutex connlist_mutex; struct list_head connlist; /* all iSER IB connections */ - kmem_cache_t *desc_cache; + struct kmem_cache *desc_cache; }; extern struct iser_global ig; diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 9b3d79c796c8..0a7d1ab60e6d 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -304,18 +304,14 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) static int iser_check_xmit(struct iscsi_conn *conn, void *task) { - int rc = 0; struct iscsi_iser_conn *iser_conn = conn->dd_data; - write_lock_bh(conn->recv_lock); if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == ISER_QP_MAX_REQ_DTOS) { - iser_dbg("%ld can't xmit task %p, suspending tx\n",jiffies,task); - set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); - rc = -EAGAIN; + iser_dbg("%ld can't xmit task %p\n",jiffies,task); + return -ENOBUFS; } - write_unlock_bh(conn->recv_lock); - return rc; + return 0; } @@ -340,7 +336,7 @@ int iser_send_command(struct iscsi_conn *conn, return -EPERM; } if (iser_check_xmit(conn, ctask)) - return -EAGAIN; + return -ENOBUFS; edtl = ntohl(hdr->data_length); @@ -426,7 +422,7 @@ int iser_send_data_out(struct iscsi_conn *conn, } if (iser_check_xmit(conn, ctask)) - return -EAGAIN; + return -ENOBUFS; itt = ntohl(hdr->itt); data_seg_len = ntoh24(hdr->dlength); @@ -487,10 +483,8 @@ int iser_send_control(struct iscsi_conn *conn, struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iser_desc *mdesc = mtask->dd_data; struct iser_dto *send_dto = NULL; - unsigned int itt; unsigned long data_seg_len; int err = 0; - unsigned char opcode; struct iser_regd_buf *regd_buf; struct iser_device *device; @@ -500,7 +494,7 @@ int iser_send_control(struct iscsi_conn *conn, } if (iser_check_xmit(conn,mtask)) - return -EAGAIN; + return -ENOBUFS; /* build the tx desc regd header and add it to the tx desc dto */ mdesc->type = ISCSI_TX_CONTROL; @@ -512,8 +506,6 @@ int iser_send_control(struct iscsi_conn *conn, iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE); - itt = ntohl(mtask->hdr->itt); - opcode = mtask->hdr->opcode & ISCSI_OPCODE_MASK; data_seg_len = ntoh24(mtask->hdr->dlength); if (data_seg_len > 0) { @@ -609,6 +601,7 @@ void iser_snd_completion(struct iser_desc *tx_desc) struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn; struct iscsi_conn *conn = iser_conn->iscsi_conn; struct iscsi_mgmt_task *mtask; + int resume_tx = 0; iser_dbg("Initiator, Data sent dto=0x%p\n", dto); @@ -617,15 +610,16 @@ void iser_snd_completion(struct iser_desc *tx_desc) if (tx_desc->type == ISCSI_TX_DATAOUT) kmem_cache_free(ig.desc_cache, tx_desc); + if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) == + ISER_QP_MAX_REQ_DTOS) + resume_tx = 1; + atomic_dec(&ib_conn->post_send_buf_count); - write_lock(conn->recv_lock); - if (conn->suspend_tx) { + if (resume_tx) { iser_dbg("%ld resuming tx\n",jiffies); - clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx); scsi_queue_work(conn->session->host, &conn->xmitwork); } - write_unlock(conn->recv_lock); if (tx_desc->type == ISCSI_TX_CONTROL) { /* this arithmetic is legal by libiscsi dd_data allocation */ diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 0606744c3f84..fc9f1fd0ae54 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -35,6 +35,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/highmem.h> #include <asm/io.h> #include <asm/scatterlist.h> #include <linux/scatterlist.h> @@ -51,7 +52,7 @@ */ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) { - struct device *dma_device; + struct ib_device *dev; if ((atomic_read(®d_buf->ref_count) == 0) || atomic_dec_and_test(®d_buf->ref_count)) { @@ -60,8 +61,8 @@ int iser_regd_buff_release(struct iser_regd_buf *regd_buf) iser_unreg_mem(®d_buf->reg); if (regd_buf->dma_addr) { - dma_device = regd_buf->device->ib_device->dma_device; - dma_unmap_single(dma_device, + dev = regd_buf->device->ib_device; + ib_dma_unmap_single(dev, regd_buf->dma_addr, regd_buf->data_size, regd_buf->direction); @@ -83,12 +84,12 @@ void iser_reg_single(struct iser_device *device, struct iser_regd_buf *regd_buf, enum dma_data_direction direction) { - dma_addr_t dma_addr; + u64 dma_addr; - dma_addr = dma_map_single(device->ib_device->dma_device, - regd_buf->virt_addr, - regd_buf->data_size, direction); - BUG_ON(dma_mapping_error(dma_addr)); + dma_addr = ib_dma_map_single(device->ib_device, + regd_buf->virt_addr, + regd_buf->data_size, direction); + BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); regd_buf->reg.lkey = device->mr->lkey; regd_buf->reg.len = regd_buf->data_size; @@ -106,14 +107,14 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { int dma_nents; - struct device *dma_device; + struct ib_device *dev; char *mem = NULL; struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; unsigned long cmd_data_len = data->data_len; if (cmd_data_len > ISER_KMALLOC_THRESHOLD) mem = (void *)__get_free_pages(GFP_NOIO, - long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); + ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); else mem = kmalloc(cmd_data_len, GFP_NOIO); @@ -146,17 +147,12 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, iser_ctask->data_copy[cmd_dir].copy_buf = mem; - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; - - if (cmd_dir == ISER_DIR_OUT) - dma_nents = dma_map_sg(dma_device, - &iser_ctask->data_copy[cmd_dir].sg_single, - 1, DMA_TO_DEVICE); - else - dma_nents = dma_map_sg(dma_device, - &iser_ctask->data_copy[cmd_dir].sg_single, - 1, DMA_FROM_DEVICE); - + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; + dma_nents = ib_dma_map_sg(dev, + &iser_ctask->data_copy[cmd_dir].sg_single, + 1, + (cmd_dir == ISER_DIR_OUT) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); BUG_ON(dma_nents == 0); iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; @@ -169,19 +165,16 @@ int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir cmd_dir) { - struct device *dma_device; + struct ib_device *dev; struct iser_data_buf *mem_copy; unsigned long cmd_data_len; - dma_device = iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; - mem_copy = &iser_ctask->data_copy[cmd_dir]; + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; + mem_copy = &iser_ctask->data_copy[cmd_dir]; - if (cmd_dir == ISER_DIR_OUT) - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, - DMA_TO_DEVICE); - else - dma_unmap_sg(dma_device, &mem_copy->sg_single, 1, - DMA_FROM_DEVICE); + ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, + (cmd_dir == ISER_DIR_OUT) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); if (cmd_dir == ISER_DIR_IN) { char *mem; @@ -210,7 +203,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, if (cmd_data_len > ISER_KMALLOC_THRESHOLD) free_pages((unsigned long)mem_copy->copy_buf, - long_log2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); + ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); else kfree(mem_copy->copy_buf); @@ -230,11 +223,12 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, * consecutive elements. Also, it handles one entry SG. */ static int iser_sg_to_page_vec(struct iser_data_buf *data, - struct iser_page_vec *page_vec) + struct iser_page_vec *page_vec, + struct ib_device *ibdev) { struct scatterlist *sg = (struct scatterlist *)data->buf; - dma_addr_t first_addr, last_addr, page; - int start_aligned, end_aligned; + u64 first_addr, last_addr, page; + int end_aligned; unsigned int cur_page = 0; unsigned long total_sz = 0; int i; @@ -243,19 +237,21 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, page_vec->offset = (u64) sg[0].offset & ~MASK_4K; for (i = 0; i < data->dma_nents; i++) { - total_sz += sg_dma_len(&sg[i]); + unsigned int dma_len = ib_sg_dma_len(ibdev, &sg[i]); + + total_sz += dma_len; - first_addr = sg_dma_address(&sg[i]); - last_addr = first_addr + sg_dma_len(&sg[i]); + first_addr = ib_sg_dma_address(ibdev, &sg[i]); + last_addr = first_addr + dma_len; - start_aligned = !(first_addr & ~MASK_4K); end_aligned = !(last_addr & ~MASK_4K); /* continue to collect page fragments till aligned or SG ends */ while (!end_aligned && (i + 1 < data->dma_nents)) { i++; - total_sz += sg_dma_len(&sg[i]); - last_addr = sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]); + dma_len = ib_sg_dma_len(ibdev, &sg[i]); + total_sz += dma_len; + last_addr = ib_sg_dma_address(ibdev, &sg[i]) + dma_len; end_aligned = !(last_addr & ~MASK_4K); } @@ -287,10 +283,11 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data, * the number of entries which are aligned correctly. Supports the case where * consecutive SG elements are actually fragments of the same physcial page. */ -static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) +static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, + struct ib_device *ibdev) { struct scatterlist *sg; - dma_addr_t end_addr, next_addr; + u64 end_addr, next_addr; int i, cnt; unsigned int ret_len = 0; @@ -302,12 +299,12 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) (unsigned long)page_to_phys(sg[i].page), (unsigned long)sg[i].offset, (unsigned long)sg[i].length); */ - end_addr = sg_dma_address(&sg[i]) + - sg_dma_len(&sg[i]); + end_addr = ib_sg_dma_address(ibdev, &sg[i]) + + ib_sg_dma_len(ibdev, &sg[i]); /* iser_dbg("Checking sg iobuf end address " "0x%08lX\n", end_addr); */ if (i + 1 < data->dma_nents) { - next_addr = sg_dma_address(&sg[i+1]); + next_addr = ib_sg_dma_address(ibdev, &sg[i+1]); /* are i, i+1 fragments of the same page? */ if (end_addr == next_addr) continue; @@ -324,7 +321,8 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data) return ret_len; } -static void iser_data_buf_dump(struct iser_data_buf *data) +static void iser_data_buf_dump(struct iser_data_buf *data, + struct ib_device *ibdev) { struct scatterlist *sg = (struct scatterlist *)data->buf; int i; @@ -332,9 +330,9 @@ static void iser_data_buf_dump(struct iser_data_buf *data) for (i = 0; i < data->dma_nents; i++) iser_err("sg[%d] dma_addr:0x%lX page:0x%p " "off:0x%x sz:0x%x dma_len:0x%x\n", - i, (unsigned long)sg_dma_address(&sg[i]), + i, (unsigned long)ib_sg_dma_address(ibdev, &sg[i]), sg[i].page, sg[i].offset, - sg[i].length,sg_dma_len(&sg[i])); + sg[i].length, ib_sg_dma_len(ibdev, &sg[i])); } static void iser_dump_page_vec(struct iser_page_vec *page_vec) @@ -348,7 +346,8 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec) } static void iser_page_vec_build(struct iser_data_buf *data, - struct iser_page_vec *page_vec) + struct iser_page_vec *page_vec, + struct ib_device *ibdev) { int page_vec_len = 0; @@ -356,14 +355,14 @@ static void iser_page_vec_build(struct iser_data_buf *data, page_vec->offset = 0; iser_dbg("Translating sg sz: %d\n", data->dma_nents); - page_vec_len = iser_sg_to_page_vec(data,page_vec); + page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); page_vec->length = page_vec_len; if (page_vec_len * SIZE_4K < page_vec->data_size) { iser_err("page_vec too short to hold this SG\n"); - iser_data_buf_dump(data); + iser_data_buf_dump(data, ibdev); iser_dump_page_vec(page_vec); BUG(); } @@ -374,13 +373,12 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, enum iser_data_dir iser_dir, enum dma_data_direction dma_dir) { - struct device *dma_device; + struct ib_device *dev; iser_ctask->dir[iser_dir] = 1; - dma_device = - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; - data->dma_nents = dma_map_sg(dma_device, data->buf, data->size, dma_dir); + data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); if (data->dma_nents == 0) { iser_err("dma_map_sg failed!!!\n"); return -EINVAL; @@ -390,20 +388,19 @@ int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) { - struct device *dma_device; + struct ib_device *dev; struct iser_data_buf *data; - dma_device = - iser_ctask->iser_conn->ib_conn->device->ib_device->dma_device; + dev = iser_ctask->iser_conn->ib_conn->device->ib_device; if (iser_ctask->dir[ISER_DIR_IN]) { data = &iser_ctask->data[ISER_DIR_IN]; - dma_unmap_sg(dma_device, data->buf, data->size, DMA_FROM_DEVICE); + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); } if (iser_ctask->dir[ISER_DIR_OUT]) { data = &iser_ctask->data[ISER_DIR_OUT]; - dma_unmap_sg(dma_device, data->buf, data->size, DMA_TO_DEVICE); + ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); } } @@ -418,6 +415,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, { struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; struct iser_device *device = ib_conn->device; + struct ib_device *ibdev = device->ib_device; struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; struct iser_regd_buf *regd_buf; int aligned_len; @@ -427,11 +425,11 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, regd_buf = &iser_ctask->rdma_regd[cmd_dir]; - aligned_len = iser_data_buf_aligned_len(mem); + aligned_len = iser_data_buf_aligned_len(mem, ibdev); if (aligned_len != mem->dma_nents) { iser_err("rdma alignment violation %d/%d aligned\n", aligned_len, mem->size); - iser_data_buf_dump(mem); + iser_data_buf_dump(mem, ibdev); /* unmap the command data before accessing it */ iser_dma_unmap_task_data(iser_ctask); @@ -449,8 +447,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, regd_buf->reg.lkey = device->mr->lkey; regd_buf->reg.rkey = device->mr->rkey; - regd_buf->reg.len = sg_dma_len(&sg[0]); - regd_buf->reg.va = sg_dma_address(&sg[0]); + regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); + regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); regd_buf->reg.is_fmr = 0; iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " @@ -460,10 +458,10 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, (unsigned long)regd_buf->reg.va, (unsigned long)regd_buf->reg.len); } else { /* use FMR for multiple dma entries */ - iser_page_vec_build(mem, ib_conn->page_vec); + iser_page_vec_build(mem, ib_conn->page_vec, ibdev); err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); if (err) { - iser_data_buf_dump(mem); + iser_data_buf_dump(mem, ibdev); iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, ntoh24(iser_ctask->desc.iscsi_header.dlength)); iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 18a000034996..693b77002897 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -48,7 +48,7 @@ static void iser_cq_tasklet_fn(unsigned long data); static void iser_cq_callback(struct ib_cq *cq, void *cq_context); -static void iser_comp_error_worker(void *data); +static void iser_comp_error_worker(struct work_struct *work); static void iser_cq_event_callback(struct ib_event *cause, void *context) { @@ -480,8 +480,7 @@ int iser_conn_init(struct iser_conn **ibconn) init_waitqueue_head(&ib_conn->wait); atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0); - INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker, - ib_conn); + INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker); INIT_LIST_HEAD(&ib_conn->conn_list); spin_lock_init(&ib_conn->lock); @@ -754,9 +753,10 @@ int iser_post_send(struct iser_desc *tx_desc) return ret_val; } -static void iser_comp_error_worker(void *data) +static void iser_comp_error_worker(struct work_struct *work) { - struct iser_conn *ib_conn = data; + struct iser_conn *ib_conn = + container_of(work, struct iser_conn, comperror_work); /* getting here when the state is UP means that the conn is being * * terminated asynchronously from the iSCSI layer's perspective. */ diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4b09147f438f..cdecbf5911c8 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -122,9 +122,8 @@ static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size, if (!iu->buf) goto out_free_iu; - iu->dma = dma_map_single(host->dev->dev->dma_device, - iu->buf, size, direction); - if (dma_mapping_error(iu->dma)) + iu->dma = ib_dma_map_single(host->dev->dev, iu->buf, size, direction); + if (ib_dma_mapping_error(host->dev->dev, iu->dma)) goto out_free_buf; iu->size = size; @@ -145,8 +144,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) if (!iu) return; - dma_unmap_single(host->dev->dev->dma_device, - iu->dma, iu->size, iu->direction); + ib_dma_unmap_single(host->dev->dev, iu->dma, iu->size, iu->direction); kfree(iu->buf); kfree(iu); } @@ -390,9 +388,10 @@ static void srp_disconnect_target(struct srp_target_port *target) wait_for_completion(&target->done); } -static void srp_remove_work(void *target_ptr) +static void srp_remove_work(struct work_struct *work) { - struct srp_target_port *target = target_ptr; + struct srp_target_port *target = + container_of(work, struct srp_target_port, work); spin_lock_irq(target->scsi_host->host_lock); if (target->state != SRP_TARGET_DEAD) { @@ -481,8 +480,8 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scat = &req->fake_sg; } - dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents, - scmnd->sc_data_direction); + ib_dma_unmap_sg(target->srp_host->dev->dev, scat, nents, + scmnd->sc_data_direction); } static void srp_remove_req(struct srp_target_port *target, struct srp_request *req) @@ -575,7 +574,7 @@ err: spin_lock_irq(target->scsi_host->host_lock); if (target->state == SRP_TARGET_CONNECTING) { target->state = SRP_TARGET_DEAD; - INIT_WORK(&target->work, srp_remove_work, target); + INIT_WORK(&target->work, srp_remove_work); schedule_work(&target->work); } spin_unlock_irq(target->scsi_host->host_lock); @@ -594,23 +593,26 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, int i, j; int ret; struct srp_device *dev = target->srp_host->dev; + struct ib_device *ibdev = dev->dev; if (!dev->fmr_pool) return -ENODEV; - if ((sg_dma_address(&scat[0]) & ~dev->fmr_page_mask) && + if ((ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask) && mellanox_workarounds && !memcmp(&target->ioc_guid, mellanox_oui, 3)) return -EINVAL; len = page_cnt = 0; for (i = 0; i < sg_cnt; ++i) { - if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) { + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); + + if (ib_sg_dma_address(ibdev, &scat[i]) & ~dev->fmr_page_mask) { if (i > 0) return -EINVAL; else ++page_cnt; } - if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) & + if ((ib_sg_dma_address(ibdev, &scat[i]) + dma_len) & ~dev->fmr_page_mask) { if (i < sg_cnt - 1) return -EINVAL; @@ -618,7 +620,7 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, ++page_cnt; } - len += sg_dma_len(&scat[i]); + len += dma_len; } page_cnt += len >> dev->fmr_page_shift; @@ -630,10 +632,14 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, return -ENOMEM; page_cnt = 0; - for (i = 0; i < sg_cnt; ++i) - for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size) + for (i = 0; i < sg_cnt; ++i) { + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); + + for (j = 0; j < dma_len; j += dev->fmr_page_size) dma_pages[page_cnt++] = - (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j; + (ib_sg_dma_address(ibdev, &scat[i]) & + dev->fmr_page_mask) + j; + } req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool, dma_pages, page_cnt, io_addr); @@ -643,7 +649,8 @@ static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat, goto out; } - buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask); + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) & + ~dev->fmr_page_mask); buf->key = cpu_to_be32(req->fmr->fmr->rkey); buf->len = cpu_to_be32(len); @@ -662,6 +669,8 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, struct srp_cmd *cmd = req->cmd->buf; int len, nents, count; u8 fmt = SRP_DATA_DESC_DIRECT; + struct srp_device *dev; + struct ib_device *ibdev; if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE) return sizeof (struct srp_cmd); @@ -686,8 +695,10 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen); } - count = dma_map_sg(target->srp_host->dev->dev->dma_device, - scat, nents, scmnd->sc_data_direction); + dev = target->srp_host->dev; + ibdev = dev->dev; + + count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); fmt = SRP_DATA_DESC_DIRECT; len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf); @@ -701,9 +712,9 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, */ struct srp_direct_buf *buf = (void *) cmd->add_data; - buf->va = cpu_to_be64(sg_dma_address(scat)); - buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey); - buf->len = cpu_to_be32(sg_dma_len(scat)); + buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat)); + buf->key = cpu_to_be32(dev->mr->rkey); + buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat)); } else if (srp_map_fmr(target, scat, count, req, (void *) cmd->add_data)) { /* @@ -721,13 +732,14 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, count * sizeof (struct srp_direct_buf); for (i = 0; i < count; ++i) { + unsigned int dma_len = ib_sg_dma_len(ibdev, &scat[i]); + buf->desc_list[i].va = - cpu_to_be64(sg_dma_address(&scat[i])); + cpu_to_be64(ib_sg_dma_address(ibdev, &scat[i])); buf->desc_list[i].key = - cpu_to_be32(target->srp_host->dev->mr->rkey); - buf->desc_list[i].len = - cpu_to_be32(sg_dma_len(&scat[i])); - datalen += sg_dma_len(&scat[i]); + cpu_to_be32(dev->mr->rkey); + buf->desc_list[i].len = cpu_to_be32(dma_len); + datalen += dma_len; } if (scmnd->sc_data_direction == DMA_TO_DEVICE) @@ -807,13 +819,15 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) { + struct ib_device *dev; struct srp_iu *iu; u8 opcode; iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, - target->max_ti_iu_len, DMA_FROM_DEVICE); + dev = target->srp_host->dev->dev; + ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, + DMA_FROM_DEVICE); opcode = *(u8 *) iu->buf; @@ -849,8 +863,8 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) break; } - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, - target->max_ti_iu_len, DMA_FROM_DEVICE); + ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, + DMA_FROM_DEVICE); } static void srp_completion(struct ib_cq *cq, void *target_ptr) @@ -968,6 +982,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, struct srp_request *req; struct srp_iu *iu; struct srp_cmd *cmd; + struct ib_device *dev; int len; if (target->state == SRP_TARGET_CONNECTING) @@ -984,8 +999,9 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, if (!iu) goto err; - dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma, - srp_max_iu_len, DMA_TO_DEVICE); + dev = target->srp_host->dev->dev; + ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, + DMA_TO_DEVICE); req = list_entry(target->free_reqs.next, struct srp_request, list); @@ -1017,8 +1033,8 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, goto err_unmap; } - dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma, - srp_max_iu_len, DMA_TO_DEVICE); + ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, + DMA_TO_DEVICE); if (__srp_post_send(target, iu, len)) { printk(KERN_ERR PFX "Send failed\n"); @@ -1176,9 +1192,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) break; } - target->status = srp_alloc_iu_bufs(target); - if (target->status) - break; + if (!target->rx_ring[0]) { + target->status = srp_alloc_iu_bufs(target); + if (target->status) + break; + } qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL); if (!qp_attr) { @@ -1716,7 +1734,8 @@ static ssize_t srp_create_target(struct class_device *class_dev, if (!target_host) return -ENOMEM; - target_host->max_lun = SRP_MAX_LUN; + target_host->max_lun = SRP_MAX_LUN; + target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; target = host_to_target(target_host); @@ -1879,7 +1898,7 @@ static void srp_add_one(struct ib_device *device) */ srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1); srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift; - srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1); + srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1); INIT_LIST_HEAD(&srp_dev->dev_list); diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index d4e35ef51374..c21772317b86 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h @@ -87,7 +87,7 @@ struct srp_device { struct ib_fmr_pool *fmr_pool; int fmr_page_shift; int fmr_page_size; - unsigned long fmr_page_mask; + u64 fmr_page_mask; }; struct srp_host { @@ -161,7 +161,7 @@ struct srp_target_port { }; struct srp_iu { - dma_addr_t dma; + u64 dma; void *buf; size_t size; enum dma_data_direction direction; |