diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 09:38:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 09:38:14 -0700 |
commit | ae045e2455429c418a418a3376301a9e5753a0a8 (patch) | |
tree | b445bdeecd3f38aa0d0a29c9585cee49e4ccb0f1 /drivers/infiniband/hw/cxgb4 | |
parent | f4f142ed4ef835709c7e6d12eaca10d190bcebed (diff) | |
parent | d247b6ab3ce6dd43665780865ec5fa145d9ab6bd (diff) | |
download | talos-obmc-linux-ae045e2455429c418a418a3376301a9e5753a0a8.tar.gz talos-obmc-linux-ae045e2455429c418a418a3376301a9e5753a0a8.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
"Highlights:
1) Steady transitioning of the BPF instructure to a generic spot so
all kernel subsystems can make use of it, from Alexei Starovoitov.
2) SFC driver supports busy polling, from Alexandre Rames.
3) Take advantage of hash table in UDP multicast delivery, from David
Held.
4) Lighten locking, in particular by getting rid of the LRU lists, in
inet frag handling. From Florian Westphal.
5) Add support for various RFC6458 control messages in SCTP, from
Geir Ola Vaagland.
6) Allow to filter bridge forwarding database dumps by device, from
Jamal Hadi Salim.
7) virtio-net also now supports busy polling, from Jason Wang.
8) Some low level optimization tweaks in pktgen from Jesper Dangaard
Brouer.
9) Add support for ipv6 address generation modes, so that userland
can have some input into the process. From Jiri Pirko.
10) Consolidate common TCP connection request code in ipv4 and ipv6,
from Octavian Purdila.
11) New ARP packet logger in netfilter, from Pablo Neira Ayuso.
12) Generic resizable RCU hash table, with intial users in netlink and
nftables. From Thomas Graf.
13) Maintain a name assignment type so that userspace can see where a
network device name came from (enumerated by kernel, assigned
explicitly by userspace, etc.) From Tom Gundersen.
14) Automatic flow label generation on transmit in ipv6, from Tom
Herbert.
15) New packet timestamping facilities from Willem de Bruijn, meant to
assist in measuring latencies going into/out-of the packet
scheduler, latency from TCP data transmission to ACK, etc"
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1536 commits)
cxgb4 : Disable recursive mailbox commands when enabling vi
net: reduce USB network driver config options.
tg3: Modify tg3_tso_bug() to handle multiple TX rings
amd-xgbe: Perform phy connect/disconnect at dev open/stop
amd-xgbe: Use dma_set_mask_and_coherent to set DMA mask
net: sun4i-emac: fix memory leak on bad packet
sctp: fix possible seqlock seadlock in sctp_packet_transmit()
Revert "net: phy: Set the driver when registering an MDIO bus device"
cxgb4vf: Turn off SGE RX/TX Callback Timers and interrupts in PCI shutdown routine
team: Simplify return path of team_newlink
bridge: Update outdated comment on promiscuous mode
net-timestamp: ACK timestamp for bytestreams
net-timestamp: TCP timestamping
net-timestamp: SCHED timestamp on entering packet scheduler
net-timestamp: add key to disambiguate concurrent datagrams
net-timestamp: move timestamp flags out of sk_flags
net-timestamp: extend SCM_TIMESTAMPING ancillary data struct
cxgb4i : Move stray CPL definitions to cxgb4 driver
tcp: reduce spurious retransmits due to transient SACK reneging
qlcnic: Initialize dcbnl_ops before register_netdev
...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 112 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 192 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/ev.c | 55 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 40 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 113 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 19 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4fw_ri_api.h | 1 |
9 files changed, 467 insertions, 93 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 768a0fb67dd6..c2fb71c182a8 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -79,9 +79,10 @@ static int dack_mode = 1; module_param(dack_mode, int, 0644); MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); -int c4iw_max_read_depth = 8; +uint c4iw_max_read_depth = 32; module_param(c4iw_max_read_depth, int, 0644); -MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)"); +MODULE_PARM_DESC(c4iw_max_read_depth, + "Per-connection max ORD/IRD (default=32)"); static int enable_tcp_timestamps; module_param(enable_tcp_timestamps, int, 0644); @@ -474,7 +475,8 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) 16)) | FW_WR_FLOWID(ep->hwtid)); flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; - flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); + flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN + (ep->com.dev->rdev.lldi.pf)); flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; @@ -821,6 +823,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, if (mpa_rev_to_use == 2) { mpa->private_data_size = htons(ntohs(mpa->private_data_size) + sizeof (struct mpa_v2_conn_params)); + PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, + ep->ord); mpa_v2_params.ird = htons((u16)ep->ird); mpa_v2_params.ord = htons((u16)ep->ord); @@ -1190,8 +1194,8 @@ static int connect_request_upcall(struct c4iw_ep *ep) sizeof(struct mpa_v2_conn_params); } else { /* this means MPA_v1 is used. Send max supported */ - event.ord = c4iw_max_read_depth; - event.ird = c4iw_max_read_depth; + event.ord = cur_max_read_depth(ep->com.dev); + event.ird = cur_max_read_depth(ep->com.dev); event.private_data_len = ep->plen; event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); } @@ -1255,6 +1259,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits) return credits; } +#define RELAXED_IRD_NEGOTIATION 1 + static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) { struct mpa_message *mpa; @@ -1366,17 +1372,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) MPA_V2_IRD_ORD_MASK; resp_ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; + PDBG("%s responder ird %u ord %u ep ird %u ord %u\n", + __func__, resp_ird, resp_ord, ep->ird, ep->ord); /* * This is a double-check. Ideally, below checks are * not required since ird/ord stuff has been taken * care of in c4iw_accept_cr */ - if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { + if (ep->ird < resp_ord) { + if (RELAXED_IRD_NEGOTIATION && resp_ord <= + ep->com.dev->rdev.lldi.max_ordird_qp) + ep->ird = resp_ord; + else + insuff_ird = 1; + } else if (ep->ird > resp_ord) { + ep->ird = resp_ord; + } + if (ep->ord > resp_ird) { + if (RELAXED_IRD_NEGOTIATION) + ep->ord = resp_ird; + else + insuff_ird = 1; + } + if (insuff_ird) { err = -ENOMEM; ep->ird = resp_ord; ep->ord = resp_ird; - insuff_ird = 1; } if (ntohs(mpa_v2_params->ird) & @@ -1579,6 +1601,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) MPA_V2_IRD_ORD_MASK; ep->ord = ntohs(mpa_v2_params->ord) & MPA_V2_IRD_ORD_MASK; + PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, + ep->ord); if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) if (peer2peer) { if (ntohs(mpa_v2_params->ord) & @@ -1798,6 +1822,20 @@ static int is_neg_adv(unsigned int status) status == CPL_ERR_KEEPALV_NEG_ADVICE; } +static char *neg_adv_str(unsigned int status) +{ + switch (status) { + case CPL_ERR_RTX_NEG_ADVICE: + return "Retransmit timeout"; + case CPL_ERR_PERSIST_NEG_ADVICE: + return "Persist timeout"; + case CPL_ERR_KEEPALV_NEG_ADVICE: + return "Keepalive timeout"; + default: + return "Unknown"; + } +} + static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) { ep->snd_win = snd_win; @@ -1996,8 +2034,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) status, status2errno(status)); if (is_neg_adv(status)) { - printk(KERN_WARNING MOD "Connection problems for atid %u\n", - atid); + dev_warn(&dev->rdev.lldi.pdev->dev, + "Connection problems for atid %u status %u (%s)\n", + atid, status, neg_adv_str(status)); return 0; } @@ -2472,8 +2511,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) ep = lookup_tid(t, tid); if (is_neg_adv(req->status)) { - PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, - ep->hwtid); + dev_warn(&dev->rdev.lldi.pdev->dev, + "Negative advice on abort - tid %u status %d (%s)\n", + ep->hwtid, req->status, neg_adv_str(req->status)); return 0; } PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, @@ -2731,8 +2771,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) BUG_ON(!qp); set_bit(ULP_ACCEPT, &ep->com.history); - if ((conn_param->ord > c4iw_max_read_depth) || - (conn_param->ird > c4iw_max_read_depth)) { + if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || + (conn_param->ird > cur_max_read_depth(ep->com.dev))) { abort_connection(ep, NULL, GFP_KERNEL); err = -EINVAL; goto err; @@ -2740,31 +2780,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { if (conn_param->ord > ep->ird) { - ep->ird = conn_param->ird; - ep->ord = conn_param->ord; - send_mpa_reject(ep, conn_param->private_data, - conn_param->private_data_len); - abort_connection(ep, NULL, GFP_KERNEL); - err = -ENOMEM; - goto err; + if (RELAXED_IRD_NEGOTIATION) { + ep->ord = ep->ird; + } else { + ep->ird = conn_param->ird; + ep->ord = conn_param->ord; + send_mpa_reject(ep, conn_param->private_data, + conn_param->private_data_len); + abort_connection(ep, NULL, GFP_KERNEL); + err = -ENOMEM; + goto err; + } } - if (conn_param->ird > ep->ord) { - if (!ep->ord) - conn_param->ird = 1; - else { + if (conn_param->ird < ep->ord) { + if (RELAXED_IRD_NEGOTIATION && + ep->ord <= h->rdev.lldi.max_ordird_qp) { + conn_param->ird = ep->ord; + } else { abort_connection(ep, NULL, GFP_KERNEL); err = -ENOMEM; goto err; } } - } ep->ird = conn_param->ird; ep->ord = conn_param->ord; - if (ep->mpa_attr.version != 2) + if (ep->mpa_attr.version == 1) { if (peer2peer && ep->ird == 0) ep->ird = 1; + } else { + if (peer2peer && + (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && + (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0) + ep->ird = 1; + } PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); @@ -2803,6 +2853,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) return 0; err1: ep->com.cm_id = NULL; + abort_connection(ep, NULL, GFP_KERNEL); cm_id->rem_ref(cm_id); err: mutex_unlock(&ep->com.mutex); @@ -2886,8 +2937,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) int iptype; int iwpm_err = 0; - if ((conn_param->ord > c4iw_max_read_depth) || - (conn_param->ird > c4iw_max_read_depth)) { + if ((conn_param->ord > cur_max_read_depth(dev)) || + (conn_param->ird > cur_max_read_depth(dev))) { err = -EINVAL; goto out; } @@ -3867,8 +3918,9 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) return 0; } if (is_neg_adv(req->status)) { - PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep, - ep->hwtid); + dev_warn(&dev->rdev.lldi.pdev->dev, + "Negative advice on abort - tid %u status %d (%s)\n", + ep->hwtid, req->status, neg_adv_str(req->status)); kfree_skb(skb); return 0; } diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index c04292c950f1..0f773e78e080 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -633,11 +633,15 @@ proc_cqe: wq->sq.cidx = (uint16_t)idx; PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; + if (c4iw_wr_log) + c4iw_log_wr_stats(wq, hw_cqe); t4_sq_consume(wq); } else { PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; BUG_ON(t4_rq_empty(wq)); + if (c4iw_wr_log) + c4iw_log_wr_stats(wq, hw_cqe); t4_rq_consume(wq); goto skip_cqe; } @@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, /* * Make actual HW queue 2x to avoid cdix_inc overflows. */ - hwentries = min(entries * 2, T4_MAX_IQ_SIZE); + hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size); /* * Make HW queue at least 64 entries so GTS updates aren't too @@ -909,14 +913,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, /* * memsize must be a multiple of the page size if its a user cq. */ - if (ucontext) { + if (ucontext) memsize = roundup(memsize, PAGE_SIZE); - hwentries = memsize / sizeof *chp->cq.queue; - while (hwentries > T4_MAX_IQ_SIZE) { - memsize -= PAGE_SIZE; - hwentries = memsize / sizeof *chp->cq.queue; - } - } chp->cq.size = hwentries; chp->cq.memsize = memsize; chp->cq.vector = vector; diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 7db82b24302b..f25df5276c22 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -33,6 +33,7 @@ #include <linux/moduleparam.h> #include <linux/debugfs.h> #include <linux/vmalloc.h> +#include <linux/math64.h> #include <rdma/ib_verbs.h> @@ -55,6 +56,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644); MODULE_PARM_DESC(allow_db_coalescing_on_t5, "Allow DB Coalescing on T5 (default = 0)"); +int c4iw_wr_log = 0; +module_param(c4iw_wr_log, int, 0444); +MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data."); + +int c4iw_wr_log_size_order = 12; +module_param(c4iw_wr_log_size_order, int, 0444); +MODULE_PARM_DESC(c4iw_wr_log_size_order, + "Number of entries (log2) in the work request timing log."); + struct uld_ctx { struct list_head entry; struct cxgb4_lld_info lldi; @@ -103,6 +113,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count, return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos); } +void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) +{ + struct wr_log_entry le; + int idx; + + if (!wq->rdev->wr_log) + return; + + idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & + (wq->rdev->wr_log_size - 1); + le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); + getnstimeofday(&le.poll_host_ts); + le.valid = 1; + le.cqe_sge_ts = CQE_TS(cqe); + if (SQ_TYPE(cqe)) { + le.qid = wq->sq.qid; + le.opcode = CQE_OPCODE(cqe); + le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; + le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; + le.wr_id = CQE_WRID_SQ_IDX(cqe); + } else { + le.qid = wq->rq.qid; + le.opcode = FW_RI_RECEIVE; + le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; + le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; + le.wr_id = CQE_WRID_MSN(cqe); + } + wq->rdev->wr_log[idx] = le; +} + +static int wr_log_show(struct seq_file *seq, void *v) +{ + struct c4iw_dev *dev = seq->private; + struct timespec prev_ts = {0, 0}; + struct wr_log_entry *lep; + int prev_ts_set = 0; + int idx, end; + +#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000) + + idx = atomic_read(&dev->rdev.wr_log_idx) & + (dev->rdev.wr_log_size - 1); + end = idx - 1; + if (end < 0) + end = dev->rdev.wr_log_size - 1; + lep = &dev->rdev.wr_log[idx]; + while (idx != end) { + if (lep->valid) { + if (!prev_ts_set) { + prev_ts_set = 1; + prev_ts = lep->poll_host_ts; + } + seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode " + "%u %s 0x%x host_wr_delta sec %lu nsec %lu " + "post_sge_ts 0x%llx cqe_sge_ts 0x%llx " + "poll_sge_ts 0x%llx post_poll_delta_ns %llu " + "cqe_poll_delta_ns %llu\n", + idx, + timespec_sub(lep->poll_host_ts, + prev_ts).tv_sec, + timespec_sub(lep->poll_host_ts, + prev_ts).tv_nsec, + lep->qid, lep->opcode, + lep->opcode == FW_RI_RECEIVE ? + "msn" : "wrid", + lep->wr_id, + timespec_sub(lep->poll_host_ts, + lep->post_host_ts).tv_sec, + timespec_sub(lep->poll_host_ts, + lep->post_host_ts).tv_nsec, + lep->post_sge_ts, lep->cqe_sge_ts, + lep->poll_sge_ts, + ts2ns(lep->poll_sge_ts - lep->post_sge_ts), + ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts)); + prev_ts = lep->poll_host_ts; + } + idx++; + if (idx > (dev->rdev.wr_log_size - 1)) + idx = 0; + lep = &dev->rdev.wr_log[idx]; + } +#undef ts2ns + return 0; +} + +static int wr_log_open(struct inode *inode, struct file *file) +{ + return single_open(file, wr_log_show, inode->i_private); +} + +static ssize_t wr_log_clear(struct file *file, const char __user *buf, + size_t count, loff_t *pos) +{ + struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private; + int i; + + if (dev->rdev.wr_log) + for (i = 0; i < dev->rdev.wr_log_size; i++) + dev->rdev.wr_log[i].valid = 0; + return count; +} + +static const struct file_operations wr_log_debugfs_fops = { + .owner = THIS_MODULE, + .open = wr_log_open, + .release = single_release, + .read = seq_read, + .llseek = seq_lseek, + .write = wr_log_clear, +}; + static int dump_qp(int id, void *p, void *data) { struct c4iw_qp *qp = p; @@ -241,12 +362,32 @@ static int dump_stag(int id, void *p, void *data) struct c4iw_debugfs_data *stagd = data; int space; int cc; + struct fw_ri_tpte tpte; + int ret; space = stagd->bufsize - stagd->pos - 1; if (space == 0) return 1; - cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8); + ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8, + (__be32 *)&tpte); + if (ret) { + dev_err(&stagd->devp->rdev.lldi.pdev->dev, + "%s cxgb4_read_tpte err %d\n", __func__, ret); + return ret; + } + cc = snprintf(stagd->buf + stagd->pos, space, + "stag: idx 0x%x valid %d key 0x%x state %d pdid %d " + "perm 0x%x ps %d len 0x%llx va 0x%llx\n", + (u32)id<<8, + G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), + G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), + ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), + ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); if (cc < space) stagd->pos += cc; return 0; @@ -259,7 +400,7 @@ static int stag_release(struct inode *inode, struct file *file) printk(KERN_INFO "%s null stagd?\n", __func__); return 0; } - kfree(stagd->buf); + vfree(stagd->buf); kfree(stagd); return 0; } @@ -282,8 +423,8 @@ static int stag_open(struct inode *inode, struct file *file) idr_for_each(&stagd->devp->mmidr, count_idrs, &count); spin_unlock_irq(&stagd->devp->lock); - stagd->bufsize = count * sizeof("0x12345678\n"); - stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL); + stagd->bufsize = count * 256; + stagd->buf = vmalloc(stagd->bufsize); if (!stagd->buf) { ret = -ENOMEM; goto err1; @@ -348,6 +489,7 @@ static int stats_show(struct seq_file *seq, void *v) dev->rdev.stats.act_ofld_conn_fails); seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", dev->rdev.stats.pas_ofld_conn_fails); + seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird); return 0; } @@ -583,6 +725,12 @@ static int setup_debugfs(struct c4iw_dev *devp) if (de && de->d_inode) de->d_inode->i_size = 4096; + if (c4iw_wr_log) { + de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root, + (void *)devp, &wr_log_debugfs_fops); + if (de && de->d_inode) + de->d_inode->i_size = 4096; + } return 0; } @@ -696,7 +844,20 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) pr_err(MOD "error allocating status page\n"); goto err4; } + + if (c4iw_wr_log) { + rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) * + sizeof(*rdev->wr_log), GFP_KERNEL); + if (rdev->wr_log) { + rdev->wr_log_size = 1 << c4iw_wr_log_size_order; + atomic_set(&rdev->wr_log_idx, 0); + } else { + pr_err(MOD "error allocating wr_log. Logging disabled\n"); + } + } + rdev->status_page->db_off = 0; + return 0; err4: c4iw_rqtpool_destroy(rdev); @@ -710,6 +871,7 @@ err1: static void c4iw_rdev_close(struct c4iw_rdev *rdev) { + kfree(rdev->wr_log); free_page((unsigned long)rdev->status_page); c4iw_pblpool_destroy(rdev); c4iw_rqtpool_destroy(rdev); @@ -768,6 +930,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) } devp->rdev.lldi = *infop; + /* init various hw-queue params based on lld info */ + PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n", + __func__, devp->rdev.lldi.sge_ingpadboundary, + devp->rdev.lldi.sge_egrstatuspagesize); + + devp->rdev.hw_queue.t4_eq_status_entries = + devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; + devp->rdev.hw_queue.t4_max_eq_size = 65520; + devp->rdev.hw_queue.t4_max_iq_size = 65520; + devp->rdev.hw_queue.t4_max_rq_size = 8192 - + devp->rdev.hw_queue.t4_eq_status_entries - 1; + devp->rdev.hw_queue.t4_max_sq_size = + devp->rdev.hw_queue.t4_max_eq_size - + devp->rdev.hw_queue.t4_eq_status_entries - 1; + devp->rdev.hw_queue.t4_max_qp_depth = + devp->rdev.hw_queue.t4_max_rq_size; + devp->rdev.hw_queue.t4_max_cq_depth = + devp->rdev.hw_queue.t4_max_iq_size - 2; + devp->rdev.hw_queue.t4_stat_len = + devp->rdev.lldi.sge_egrstatuspagesize; + /* * For T5 devices, we map all of BAR2 with WC. * For T4 devices with onchip qp mem, we map only that part @@ -818,6 +1001,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) mutex_init(&devp->rdev.stats.lock); mutex_init(&devp->db_mutex); INIT_LIST_HEAD(&devp->db_fc_list); + devp->avail_ird = devp->rdev.lldi.max_ird_adapter; if (c4iw_debugfs_root) { devp->debugfs_root = debugfs_create_dir( diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index d61d0a18f784..fbe6051af254 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -35,6 +35,55 @@ #include "iw_cxgb4.h" +static void print_tpte(struct c4iw_dev *dev, u32 stag) +{ + int ret; + struct fw_ri_tpte tpte; + + ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, + (__be32 *)&tpte); + if (ret) { + dev_err(&dev->rdev.lldi.pdev->dev, + "%s cxgb4_read_tpte err %d\n", __func__, ret); + return; + } + PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d " + "perm 0x%x ps %d len 0x%llx va 0x%llx\n", + stag & 0xffffff00, + G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)), + G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)), + G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)), + ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo), + ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo)); +} + +static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe) +{ + __be64 *p = (void *)err_cqe; + + dev_err(&dev->rdev.lldi.pdev->dev, + "AE qpid %d opcode %d status 0x%x " + "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n", + CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), + CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len), + CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); + + PDBG("%016llx %016llx %016llx %016llx\n", + be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]), + be64_to_cpu(p[3])); + + /* + * Ingress WRITE and READ_RESP errors provide + * the offending stag, so parse and log it. + */ + if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE || + CQE_OPCODE(err_cqe) == FW_RI_READ_RESP)) + print_tpte(dev, CQE_WRID_STAG(err_cqe)); +} + static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp *qhp, struct t4_cqe *err_cqe, @@ -44,11 +93,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp_attributes attrs; unsigned long flag; - printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x " - "type %d wrid.hi 0x%x wrid.lo 0x%x\n", - CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), - CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), - CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); + dump_err_cqe(dev, err_cqe); if (qhp->attr.state == C4IW_QP_STATE_RTS) { attrs.next_state = C4IW_QP_STATE_TERMINATE; diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 361fff7a0742..b5678ac97393 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -139,6 +139,29 @@ struct c4iw_stats { u64 pas_ofld_conn_fails; }; +struct c4iw_hw_queue { + int t4_eq_status_entries; + int t4_max_eq_size; + int t4_max_iq_size; + int t4_max_rq_size; + int t4_max_sq_size; + int t4_max_qp_depth; + int t4_max_cq_depth; + int t4_stat_len; +}; + +struct wr_log_entry { + struct timespec post_host_ts; + struct timespec poll_host_ts; + u64 post_sge_ts; + u64 cqe_sge_ts; + u64 poll_sge_ts; + u16 qid; + u16 wr_id; + u8 opcode; + u8 valid; +}; + struct c4iw_rdev { struct c4iw_resource resource; unsigned long qpshift; @@ -156,7 +179,11 @@ struct c4iw_rdev { unsigned long oc_mw_pa; void __iomem *oc_mw_kva; struct c4iw_stats stats; + struct c4iw_hw_queue hw_queue; struct t4_dev_status_page *status_page; + atomic_t wr_log_idx; + struct wr_log_entry *wr_log; + int wr_log_size; }; static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) @@ -166,7 +193,7 @@ static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) static inline int c4iw_num_stags(struct c4iw_rdev *rdev) { - return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); + return (int)(rdev->lldi.vr->stag.size >> 5); } #define C4IW_WR_TO (30*HZ) @@ -237,6 +264,7 @@ struct c4iw_dev { struct idr atid_idr; struct idr stid_idr; struct list_head db_fc_list; + u32 avail_ird; }; static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) @@ -318,6 +346,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp, _remove_handle(rhp, idr, id, 0); } +extern uint c4iw_max_read_depth; + +static inline int cur_max_read_depth(struct c4iw_dev *dev) +{ + return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth); +} + struct c4iw_pd { struct ib_pd ibpd; u32 pdid; @@ -991,7 +1026,8 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); extern struct cxgb4_client t4c_client; extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; -extern int c4iw_max_read_depth; +extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe); +extern int c4iw_wr_log; extern int db_fc_threshold; extern int db_coalescing_threshold; extern int use_dsgl; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index b1d305338de6..72e3b69d1b76 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -318,14 +318,16 @@ static int c4iw_query_device(struct ib_device *ibdev, props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor; props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device; props->max_mr_size = T4_MAX_MR_SIZE; - props->max_qp = T4_MAX_NUM_QP; - props->max_qp_wr = T4_MAX_QP_DEPTH; + props->max_qp = dev->rdev.lldi.vr->qp.size / 2; + props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth; props->max_sge = T4_MAX_RECV_SGE; props->max_sge_rd = 1; - props->max_qp_rd_atom = c4iw_max_read_depth; - props->max_qp_init_rd_atom = c4iw_max_read_depth; - props->max_cq = T4_MAX_NUM_CQ; - props->max_cqe = T4_MAX_CQ_DEPTH; + props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter; + props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp, + c4iw_max_read_depth); + props->max_qp_init_rd_atom = props->max_qp_rd_atom; + props->max_cq = dev->rdev.lldi.vr->qp.size; + props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth; props->max_mr = c4iw_num_stags(&dev->rdev); props->max_pd = T4_MAX_NUM_PD; props->local_ca_ack_delay = 0; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 086f62f5dc9e..c158fcc02bca 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD; module_param(max_fr_immd, int, 0644); MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate"); +static int alloc_ird(struct c4iw_dev *dev, u32 ird) +{ + int ret = 0; + + spin_lock_irq(&dev->lock); + if (ird <= dev->avail_ird) + dev->avail_ird -= ird; + else + ret = -ENOMEM; + spin_unlock_irq(&dev->lock); + + if (ret) + dev_warn(&dev->rdev.lldi.pdev->dev, + "device IRD resources exhausted\n"); + + return ret; +} + +static void free_ird(struct c4iw_dev *dev, int ird) +{ + spin_lock_irq(&dev->lock); + dev->avail_ird += ird; + spin_unlock_irq(&dev->lock); +} + static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) { unsigned long flag; @@ -180,9 +205,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, } /* - * RQT must be a power of 2. + * RQT must be a power of 2 and at least 16 deep. */ - wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size); + wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); if (!wq->rq.rqt_hwaddr) { ret = -ENOMEM; @@ -258,7 +283,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, /* * eqsize is the number of 64B entries plus the status page size. */ - eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; + eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + + rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ @@ -283,7 +309,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, /* * eqsize is the number of 64B entries plus the status page size. */ - eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES; + eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + + rdev->hw_queue.t4_eq_status_entries; res->u.sqrq.fetchszm_to_iqid = cpu_to_be32( V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ @@ -796,6 +823,11 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qhp->sq_sig_all; swsqe->flushed = 0; swsqe->wr_id = wr->wr_id; + if (c4iw_wr_log) { + swsqe->sge_ts = cxgb4_read_sge_timestamp( + qhp->rhp->rdev.lldi.ports[0]); + getnstimeofday(&swsqe->host_ts); + } init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); @@ -859,6 +891,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, } qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; + if (c4iw_wr_log) { + qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = + cxgb4_read_sge_timestamp( + qhp->rhp->rdev.lldi.ports[0]); + getnstimeofday( + &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); + } wqe->recv.opcode = FW_RI_RECV_WR; wqe->recv.r1 = 0; @@ -1202,12 +1241,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) int ret; struct sk_buff *skb; - PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, - qhp->ep->hwtid); + PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, + qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); skb = alloc_skb(sizeof *wqe, GFP_KERNEL); - if (!skb) - return -ENOMEM; + if (!skb) { + ret = -ENOMEM; + goto out; + } + ret = alloc_ird(rhp, qhp->attr.max_ird); + if (ret) { + qhp->attr.max_ird = 0; + kfree_skb(skb); + goto out; + } set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); @@ -1258,10 +1305,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ret = c4iw_ofld_send(&rhp->rdev, skb); if (ret) - goto out; + goto err1; ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait, qhp->ep->hwtid, qhp->wq.sq.qid, __func__); + if (!ret) + goto out; +err1: + free_ird(rhp, qhp->attr.max_ird); out: PDBG("%s ret %d\n", __func__, ret); return ret; @@ -1306,7 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, newattr.max_ord = attrs->max_ord; } if (mask & C4IW_QP_ATTR_MAX_IRD) { - if (attrs->max_ird > c4iw_max_read_depth) { + if (attrs->max_ird > cur_max_read_depth(rhp)) { ret = -EINVAL; goto out; } @@ -1529,6 +1580,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) if (!list_empty(&qhp->db_fc_entry)) list_del_init(&qhp->db_fc_entry); spin_unlock_irq(&rhp->lock); + free_ird(rhp, qhp->attr.max_ird); ucontext = ib_qp->uobject ? to_c4iw_ucontext(ib_qp->uobject->context) : NULL; @@ -1569,13 +1621,17 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE) return ERR_PTR(-EINVAL); - rqsize = roundup(attrs->cap.max_recv_wr + 1, 16); - if (rqsize > T4_MAX_RQ_SIZE) + if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) return ERR_PTR(-E2BIG); + rqsize = attrs->cap.max_recv_wr + 1; + if (rqsize < 8) + rqsize = 8; - sqsize = roundup(attrs->cap.max_send_wr + 1, 16); - if (sqsize > T4_MAX_SQ_SIZE) + if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) return ERR_PTR(-E2BIG); + sqsize = attrs->cap.max_send_wr + 1; + if (sqsize < 8) + sqsize = 8; ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL; @@ -1583,19 +1639,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, if (!qhp) return ERR_PTR(-ENOMEM); qhp->wq.sq.size = sqsize; - qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue; + qhp->wq.sq.memsize = + (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * + sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); qhp->wq.sq.flush_cidx = -1; qhp->wq.rq.size = rqsize; - qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue; + qhp->wq.rq.memsize = + (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * + sizeof(*qhp->wq.rq.queue); if (ucontext) { qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); } - PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n", - __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize); - ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx); if (ret) @@ -1619,8 +1676,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->attr.enable_rdma_read = 1; qhp->attr.enable_rdma_write = 1; qhp->attr.enable_bind = 1; - qhp->attr.max_ord = 1; - qhp->attr.max_ird = 1; + qhp->attr.max_ord = 0; + qhp->attr.max_ird = 0; qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR; spin_lock_init(&qhp->lock); mutex_init(&qhp->mutex); @@ -1714,9 +1771,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->ibqp.qp_num = qhp->wq.sq.qid; init_timer(&(qhp->timer)); INIT_LIST_HEAD(&qhp->db_fc_entry); - PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n", - __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries, - qhp->wq.sq.qid); + PDBG("%s sq id %u size %u memsize %zu num_entries %u " + "rq id %u size %u memsize %zu num_entries %u\n", __func__, + qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, + attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, + qhp->wq.rq.memsize, attrs->cap.max_recv_wr); return &qhp->ibqp; err8: kfree(mm5); @@ -1804,5 +1863,11 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof *attr); memset(init_attr, 0, sizeof *init_attr); attr->qp_state = to_ib_qp_state(qhp->attr.state); + init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; + init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; + init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; + init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; + init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; + init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; } diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 68b0a6bf4eb0..df5edfa31a8f 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h @@ -36,22 +36,11 @@ #include "t4_msg.h" #include "t4fw_ri_api.h" -#define T4_MAX_NUM_QP 65536 -#define T4_MAX_NUM_CQ 65536 #define T4_MAX_NUM_PD 65536 -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) -#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_IQ_SIZE (65520 - 1) -#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES) -#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1) -#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1) -#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1) -#define T4_MAX_NUM_STAG (1<<15) #define T4_MAX_MR_SIZE (~0ULL) #define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */ #define T4_STAG_UNSET 0xffffffff #define T4_FW_MAJ 0 -#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1) #define A_PCIE_MA_SYNC 0x30b4 struct t4_status_page { @@ -244,8 +233,8 @@ struct t4_cqe { #define CQE_WRID_SQ_IDX(x) ((x)->u.scqe.cidx) /* generic accessor macros */ -#define CQE_WRID_HI(x) ((x)->u.gen.wrid_hi) -#define CQE_WRID_LOW(x) ((x)->u.gen.wrid_low) +#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi)) +#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low)) /* macros for flit 3 of the cqe */ #define S_CQE_GENBIT 63 @@ -277,6 +266,8 @@ struct t4_swsqe { int signaled; u16 idx; int flushed; + struct timespec host_ts; + u64 sge_ts; }; static inline pgprot_t t4_pgprot_wc(pgprot_t prot) @@ -314,6 +305,8 @@ struct t4_sq { struct t4_swrqe { u64 wr_id; + struct timespec host_ts; + u64 sge_ts; }; struct t4_rq { diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h index 91289a051af9..5709e77faf7c 100644 --- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h +++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h @@ -849,6 +849,5 @@ enum { /* TCP congestion control algorithms */ #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL) #define CONG_CNTRL_VALID (1 << 18) -#define T5_OPT_2_VALID (1 << 31) #endif /* _T4FW_RI_API_H_ */ |