diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/hfi1/rc.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_alloc.c | 23 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_device.h | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.c | 25 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hem.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_cm.c | 31 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_ctrl.c | 43 | ||||
-rw-r--r-- | drivers/infiniband/hw/i40iw/i40iw_d.h | 6 |
9 files changed, 85 insertions, 72 deletions
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index fd01a760259f..af5f7936f7e5 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c @@ -814,7 +814,7 @@ static inline void hfi1_make_rc_ack_16B(struct rvt_qp *qp, struct hfi1_pportdata *ppd = ppd_from_ibp(ibp); struct hfi1_16b_header *hdr = &opa_hdr->opah; struct ib_other_headers *ohdr; - u32 bth0, bth1; + u32 bth0, bth1 = 0; u16 len, pkey; u8 becn = !!is_fecn; u8 l4 = OPA_16B_L4_IB_LOCAL; diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index 3e4c5253ab5c..a40ec939ece5 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, { int i; struct device *dev = hr_dev->dev; - u32 bits_per_long = BITS_PER_LONG; if (buf->nbufs == 1) { dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); } else { - if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT) - vunmap(buf->direct.buf); - for (i = 0; i < buf->nbufs; ++i) if (buf->page_list[i].buf) dma_free_coherent(dev, 1 << buf->page_shift, @@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, { int i = 0; dma_addr_t t; - struct page **pages; struct device *dev = hr_dev->dev; - u32 bits_per_long = BITS_PER_LONG; u32 page_size = 1 << page_shift; u32 order; @@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, buf->page_list[i].map = t; memset(buf->page_list[i].buf, 0, page_size); } - if (bits_per_long == 64 && page_shift == PAGE_SHIFT) { - pages = kmalloc_array(buf->nbufs, sizeof(*pages), - GFP_KERNEL); - if (!pages) - goto err_free; - - for (i = 0; i < buf->nbufs; ++i) - pages[i] = virt_to_page(buf->page_list[i].buf); - - buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, - PAGE_KERNEL); - kfree(pages); - if (!buf->direct.buf) - goto err_free; - } else { - buf->direct.buf = NULL; - } } return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 01d3d695cbba..b154ce40cded 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -726,11 +726,9 @@ static inline struct hns_roce_qp static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) { - u32 bits_per_long_val = BITS_PER_LONG; u32 page_size = 1 << buf->page_shift; - if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) || - buf->nbufs == 1) + if (buf->nbufs == 1) return (char *)(buf->direct.buf) + offset; else return (char *)(buf->page_list[offset >> buf->page_shift].buf) + diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index 8b733a66fae5..0eeabfbee192 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -224,6 +224,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN); chunk->npages = 0; chunk->nsg = 0; + memset(chunk->buf, 0, sizeof(chunk->buf)); list_add_tail(&chunk->list, &hem->chunk_list); } @@ -240,8 +241,7 @@ static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, if (!buf) goto fail; - sg_set_buf(mem, buf, PAGE_SIZE << order); - WARN_ON(mem->offset); + chunk->buf[chunk->npages] = buf; sg_dma_len(mem) = PAGE_SIZE << order; ++chunk->npages; @@ -267,8 +267,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem) list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) dma_free_coherent(hr_dev->dev, - chunk->mem[i].length, - lowmem_page_address(sg_page(&chunk->mem[i])), + sg_dma_len(&chunk->mem[i]), + chunk->buf[i], sg_dma_address(&chunk->mem[i])); kfree(chunk); } @@ -722,11 +722,12 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, struct hns_roce_hem_chunk *chunk; struct hns_roce_hem_mhop mhop; struct hns_roce_hem *hem; - struct page *page = NULL; + void *addr = NULL; unsigned long mhop_obj = obj; unsigned long obj_per_chunk; unsigned long idx_offset; int offset, dma_offset; + int length; int i, j; u32 hem_idx = 0; @@ -763,25 +764,25 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev, list_for_each_entry(chunk, &hem->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { + length = sg_dma_len(&chunk->mem[i]); if (dma_handle && dma_offset >= 0) { - if (sg_dma_len(&chunk->mem[i]) > - (u32)dma_offset) + if (length > (u32)dma_offset) *dma_handle = sg_dma_address( &chunk->mem[i]) + dma_offset; - dma_offset -= sg_dma_len(&chunk->mem[i]); + dma_offset -= length; } - if (chunk->mem[i].length > (u32)offset) { - page = sg_page(&chunk->mem[i]); + if (length > (u32)offset) { + addr = chunk->buf[i] + offset; goto out; } - offset -= chunk->mem[i].length; + offset -= length; } } out: mutex_unlock(&table->mutex); - return page ? lowmem_page_address(page) + offset : NULL; + return addr; } EXPORT_SYMBOL_GPL(hns_roce_table_find); diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h index db66db12075e..e8850d59e780 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.h +++ b/drivers/infiniband/hw/hns/hns_roce_hem.h @@ -78,6 +78,7 @@ struct hns_roce_hem_chunk { int npages; int nsg; struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN]; + void *buf[HNS_ROCE_HEM_CHUNK_LEN]; }; struct hns_roce_hem { diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 8f719c00467b..8e18445714a9 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1126,9 +1126,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, { struct hns_roce_v2_mpt_entry *mpt_entry; struct scatterlist *sg; + u64 page_addr; u64 *pages; + int i, j; + int len; int entry; - int i; mpt_entry = mb_buf; memset(mpt_entry, 0, sizeof(*mpt_entry)); @@ -1186,14 +1188,20 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, i = 0; for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { - pages[i] = ((u64)sg_dma_address(sg)) >> 6; - - /* Record the first 2 entry directly to MTPT table */ - if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) - break; - i++; + len = sg_dma_len(sg) >> PAGE_SHIFT; + for (j = 0; j < len; ++j) { + page_addr = sg_dma_address(sg) + + (j << mr->umem->page_shift); + pages[i] = page_addr >> 6; + + /* Record the first 2 entry directly to MTPT table */ + if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1) + goto found; + i++; + } } +found: mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0])); roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M, V2_MPT_BYTE_56_PA0_H_S, diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 493d6ef3d2d5..77870f9e1736 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1043,7 +1043,7 @@ negotiate_done: * i40iw_schedule_cm_timer * @@cm_node: connection's node * @sqbuf: buffer to send - * @type: if it es send ot close + * @type: if it is send or close * @send_retrans: if rexmits to be done * @close_when_complete: is cm_node to be removed * @@ -1067,7 +1067,8 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); if (!new_send) { - i40iw_free_sqbuf(vsi, (void *)sqbuf); + if (type != I40IW_TIMER_TYPE_CLOSE) + i40iw_free_sqbuf(vsi, (void *)sqbuf); return -ENOMEM; } new_send->retrycount = I40IW_DEFAULT_RETRYS; @@ -1082,7 +1083,6 @@ int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, new_send->timetosend += (HZ / 10); if (cm_node->close_entry) { kfree(new_send); - i40iw_free_sqbuf(vsi, (void *)sqbuf); i40iw_pr_err("already close entry\n"); return -EINVAL; } @@ -2947,8 +2947,6 @@ static struct i40iw_cm_node *i40iw_create_cm_node( loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale; loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; - loopback_remotenode->state = I40IW_CM_STATE_MPAREQ_RCVD; - i40iw_create_event(loopback_remotenode, I40IW_CM_EVENT_MPA_REQ); } return cm_node; } @@ -3689,11 +3687,16 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) cm_id->add_ref(cm_id); i40iw_add_ref(&iwqp->ibqp); - i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); - attr.qp_state = IB_QPS_RTS; cm_node->qhash_set = false; i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + + cm_node->accelerated = 1; + status = + i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); + if (status) + i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n"); + if (cm_node->loopbackpartner) { cm_node->loopbackpartner->pdata.size = conn_param->private_data_len; @@ -3704,7 +3707,6 @@ int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED); } - cm_node->accelerated = 1; if (cm_node->accept_pend) { atomic_dec(&cm_node->listener->pend_accepts_cnt); cm_node->accept_pend = 0; @@ -3864,6 +3866,12 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) goto err; } + if (cm_node->loopbackpartner) { + cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD; + i40iw_create_event(cm_node->loopbackpartner, + I40IW_CM_EVENT_MPA_REQ); + } + i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n", @@ -4044,9 +4052,6 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event) dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0); if (iwqp->page) kunmap(iwqp->page); - status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, 0); - if (status) - i40iw_pr_err("send cm event\n"); memset(&attr, 0, sizeof(attr)); attr.qp_state = IB_QPS_RTS; @@ -4054,6 +4059,10 @@ static void i40iw_cm_event_connected(struct i40iw_cm_event *event) i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); cm_node->accelerated = 1; + status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, + 0); + if (status) + i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n"); return; diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index d88c6cf47cf2..da9821a10e0d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -513,7 +513,7 @@ static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, &cqp->sdbuf, - 128, + I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size, I40IW_SD_BUF_ALIGNMENT); if (ret_code) @@ -596,14 +596,15 @@ void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) } /** - * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq - * @cqp: struct for cqp hw - * @wqe_idx: we index of cqp ring + * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index + * @cqp: pointer to CQP structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index for next WQE on CQP SQ */ -u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp, + u64 scratch, u32 *wqe_idx) { u64 *wqe = NULL; - u32 wqe_idx; enum i40iw_status_code ret_code; if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { @@ -616,21 +617,33 @@ u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) cqp->sq_ring.size); return NULL; } - I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, wqe_idx, ret_code); + I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++; if (ret_code) return NULL; - if (!wqe_idx) + if (!*wqe_idx) cqp->polarity = !cqp->polarity; - wqe = cqp->sq_base[wqe_idx].elem; - cqp->scratch_array[wqe_idx] = scratch; + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; I40IW_CQP_INIT_WQE(wqe); return wqe; } /** + * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +{ + u32 wqe_idx; + + return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} + +/** * i40iw_sc_cqp_destroy - destroy cqp during close * @cqp: struct for cqp hw */ @@ -3587,8 +3600,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, u64 *wqe; int mem_entries, wqe_entries; struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; + u64 offset; + u32 wqe_idx; - wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); if (!wqe) return I40IW_ERR_RING_FULL; @@ -3601,8 +3616,10 @@ static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); if (mem_entries) { - memcpy(sdbuf->va, &info->entry[3], (mem_entries << 4)); - data = sdbuf->pa; + offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE; + memcpy((char *)sdbuf->va + offset, &info->entry[3], + mem_entries << 4); + data = (u64)sdbuf->pa + offset; } else { data = 0; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h index 65ec39e3746b..029083cb81d5 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_d.h +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -1114,7 +1114,7 @@ #define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT) #define I40IWQPC_ARPIDX_SHIFT 48 -#define I40IWQPC_ARPIDX_MASK (0xfffULL << I40IWQPC_ARPIDX_SHIFT) +#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT) #define I40IWQPC_FLOWLABEL_SHIFT 0 #define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT) @@ -1526,7 +1526,7 @@ enum i40iw_alignment { I40IW_AEQ_ALIGNMENT = 0x100, I40IW_CEQ_ALIGNMENT = 0x100, I40IW_CQ0_ALIGNMENT = 0x100, - I40IW_SD_BUF_ALIGNMENT = 0x100 + I40IW_SD_BUF_ALIGNMENT = 0x80 }; #define I40IW_WQE_SIZE_64 64 @@ -1534,6 +1534,8 @@ enum i40iw_alignment { #define I40IW_QP_WQE_MIN_SIZE 32 #define I40IW_QP_WQE_MAX_SIZE 128 +#define I40IW_UPDATE_SD_BUF_SIZE 128 + #define I40IW_CQE_QTYPE_RQ 0 #define I40IW_CQE_QTYPE_SQ 1 |