diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mad.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 173 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 91 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 4 |
7 files changed, 140 insertions, 143 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 94c049b62c2f..a384d72ea3cd 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -788,7 +788,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * ncont; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_db; @@ -884,7 +884,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; - *cqb = mlx5_vzalloc(*inlen); + *cqb = kvzalloc(*inlen, GFP_KERNEL); if (!*cqb) { err = -ENOMEM; goto err_buf; @@ -1314,7 +1314,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) inlen = MLX5_ST_SZ_BYTES(modify_cq_in) + MLX5_FLD_SZ_BYTES(modify_cq_in, pas[0]) * npas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto ex_resize; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index f1b56de64871..95db929bdc34 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -218,7 +218,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters_ext *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; @@ -231,7 +231,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, (struct ib_pma_portcounters *)(out_mad->data + 40); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); - out_cnt = mlx5_vzalloc(sz); + out_cnt = kvzalloc(sz, GFP_KERNEL); if (!out_cnt) return IB_MAD_RESULT_FAILURE; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d45772da0963..a7f2e60085c4 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -60,8 +60,7 @@ #include "cmd.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "2.2-1" -#define DRIVER_RELDATE "Feb 2014" +#define DRIVER_VERSION "5.0-0" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); @@ -70,7 +69,7 @@ MODULE_VERSION(DRIVER_VERSION); static char mlx5_version[] = DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" - DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; + DRIVER_VERSION "\n"; enum { MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, @@ -224,8 +223,8 @@ static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, - struct ib_port_attr *props) +static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, + struct ib_port_attr *props) { struct mlx5_ib_dev *dev = to_mdev(device); struct mlx5_core_dev *mdev = dev->mdev; @@ -233,12 +232,14 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, enum ib_mtu ndev_ib_mtu; u16 qkey_viol_cntr; u32 eth_prot_oper; + int err; /* Possible bad flows are checked before filling out props so in case * of an error it will still be zeroed out. */ - if (mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num)) - return; + err = mlx5_query_port_eth_proto_oper(mdev, ð_prot_oper, port_num); + if (err) + return err; translate_eth_proto_oper(eth_prot_oper, &props->active_speed, &props->active_width); @@ -259,7 +260,7 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, ndev = mlx5_ib_get_netdev(device, port_num); if (!ndev) - return; + return 0; if (mlx5_lag_is_active(dev->mdev)) { rcu_read_lock(); @@ -282,75 +283,49 @@ static void mlx5_query_port_roce(struct ib_device *device, u8 port_num, dev_put(ndev); props->active_mtu = min(props->max_mtu, ndev_ib_mtu); + return 0; } -static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid, - const struct ib_gid_attr *attr, - void *mlx5_addr) +static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num, + unsigned int index, const union ib_gid *gid, + const struct ib_gid_attr *attr) { -#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v) - char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_l3_address); - void *mlx5_addr_mac = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr, - source_mac_47_32); - - if (!gid) - return; + enum ib_gid_type gid_type = IB_GID_TYPE_IB; + u8 roce_version = 0; + u8 roce_l3_type = 0; + bool vlan = false; + u8 mac[ETH_ALEN]; + u16 vlan_id = 0; - ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr); + if (gid) { + gid_type = attr->gid_type; + ether_addr_copy(mac, attr->ndev->dev_addr); - if (is_vlan_dev(attr->ndev)) { - MLX5_SET_RA(mlx5_addr, vlan_valid, 1); - MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev)); + if (is_vlan_dev(attr->ndev)) { + vlan = true; + vlan_id = vlan_dev_vlan_id(attr->ndev); + } } - switch (attr->gid_type) { + switch (gid_type) { case IB_GID_TYPE_IB: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1); + roce_version = MLX5_ROCE_VERSION_1; break; case IB_GID_TYPE_ROCE_UDP_ENCAP: - MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2); + roce_version = MLX5_ROCE_VERSION_2; + if (ipv6_addr_v4mapped((void *)gid)) + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; + else + roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; break; default: - WARN_ON(true); - } - - if (attr->gid_type != IB_GID_TYPE_IB) { - if (ipv6_addr_v4mapped((void *)gid)) - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV4); - else - MLX5_SET_RA(mlx5_addr, roce_l3_type, - MLX5_ROCE_L3_TYPE_IPV6); + mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); } - if ((attr->gid_type == IB_GID_TYPE_IB) || - !ipv6_addr_v4mapped((void *)gid)) - memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid)); - else - memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4); -} - -static int set_roce_addr(struct ib_device *device, u8 port_num, - unsigned int index, - const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - struct mlx5_ib_dev *dev = to_mdev(device); - u32 in[MLX5_ST_SZ_DW(set_roce_address_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0}; - void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address); - enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num); - - if (ll != IB_LINK_LAYER_ETHERNET) - return -EINVAL; - - ib_gid_to_mlx5_roce_addr(gid, attr, in_addr); - - MLX5_SET(set_roce_address_in, in, roce_address_index, index); - MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS); - return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out)); + return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, + roce_l3_type, gid->raw, mac, vlan, + vlan_id); } static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, @@ -358,13 +333,13 @@ static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num, const struct ib_gid_attr *attr, __always_unused void **context) { - return set_roce_addr(device, port_num, index, gid, attr); + return set_roce_addr(to_mdev(device), port_num, index, gid, attr); } static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num, unsigned int index, __always_unused void **context) { - return set_roce_addr(device, port_num, index, NULL, NULL); + return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL); } __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, @@ -440,7 +415,7 @@ static void get_atomic_caps(struct mlx5_ib_dev *dev, u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); u8 atomic_req_8B_endianness_mode = - MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode); + MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); /* Check if HW supports 8 bytes standard atomic operations and capable * of host endianness respond @@ -979,20 +954,31 @@ out: int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { + unsigned int count; + int ret; + switch (mlx5_get_vport_access_method(ibdev)) { case MLX5_VPORT_ACCESS_METHOD_MAD: - return mlx5_query_mad_ifc_port(ibdev, port, props); + ret = mlx5_query_mad_ifc_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_HCA: - return mlx5_query_hca_port(ibdev, port, props); + ret = mlx5_query_hca_port(ibdev, port, props); + break; case MLX5_VPORT_ACCESS_METHOD_NIC: - mlx5_query_port_roce(ibdev, port, props); - return 0; + ret = mlx5_query_port_roce(ibdev, port, props); + break; default: - return -EINVAL; + ret = -EINVAL; + } + + if (!ret && props) { + count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev); + props->gid_tbl_len -= count; } + return ret; } static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, @@ -2263,7 +2249,7 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, if (!is_valid_attr(dev->mdev, flow_attr)) return ERR_PTR(-EINVAL); - spec = mlx5_vzalloc(sizeof(*spec)); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); handler = kzalloc(sizeof(*handler), GFP_KERNEL); if (!handler || !spec) { err = -ENOMEM; @@ -2979,6 +2965,18 @@ error_0: return ret; } +static u8 mlx5_get_umr_fence(u8 umr_fence_cap) +{ + switch (umr_fence_cap) { + case MLX5_CAP_UMR_FENCE_NONE: + return MLX5_FENCE_MODE_NONE; + case MLX5_CAP_UMR_FENCE_SMALL: + return MLX5_FENCE_MODE_INITIATOR_SMALL; + default: + return MLX5_FENCE_MODE_STRONG_ORDERING; + } +} + static int create_dev_resources(struct mlx5_ib_resources *devr) { struct ib_srq_init_attr attr; @@ -3456,7 +3454,7 @@ static int mlx5_ib_query_q_counters(struct mlx5_ib_dev *dev, __be32 val; int ret, i; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3485,7 +3483,7 @@ static int mlx5_ib_query_cong_counters(struct mlx5_ib_dev *dev, int ret, i; int offset = port->cnts.num_q_counters; - out = mlx5_vzalloc(outlen); + out = kvzalloc(outlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -3530,6 +3528,11 @@ static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, return num_counters; } +static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) +{ + return mlx5_rdma_netdev_free(netdev); +} + static struct net_device* mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, u8 port_num, @@ -3538,16 +3541,19 @@ mlx5_ib_alloc_rdma_netdev(struct ib_device *hca, unsigned char name_assign_type, void (*setup)(struct net_device *)) { + struct net_device *netdev; + struct rdma_netdev *rn; + if (type != RDMA_NETDEV_IPOIB) return ERR_PTR(-EOPNOTSUPP); - return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, - name, setup); -} - -static void mlx5_ib_free_rdma_netdev(struct net_device *netdev) -{ - return mlx5_rdma_netdev_free(netdev); + netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca, + name, setup); + if (likely(!IS_ERR_OR_NULL(netdev))) { + rn = netdev_priv(netdev); + rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev; + } + return netdev; } static void *mlx5_ib_add(struct mlx5_core_dev *mdev) @@ -3680,8 +3686,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_dev_fw_str = get_dev_fw_str; - dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; - dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; + if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) + dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; + if (mlx5_core_is_pf(mdev)) { dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; @@ -3693,6 +3700,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) mlx5_ib_internal_fill_odp_caps(dev); + dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); + if (MLX5_CAP_GEN(mdev, imaicl)) { dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 38c877bc45e5..bdcf25410c99 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -349,7 +349,7 @@ struct mlx5_ib_qp { struct mlx5_ib_wq rq; u8 sq_signal_bits; - u8 fm_cache; + u8 next_fence; struct mlx5_ib_wq sq; /* serialize qp state modifications @@ -654,6 +654,7 @@ struct mlx5_ib_dev { struct mlx5_ib_port *port; struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg fp_bfreg; + u8 umr_fence; }; static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 366433f71b58..763bb5b36144 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1110,7 +1110,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + sizeof(*pas) * ((npages + 1) / 2) * 2; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 93959e1e43a3..0889ff367c86 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -823,7 +823,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_umem; @@ -931,7 +931,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); *inlen = MLX5_ST_SZ_BYTES(create_qp_in) + MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; - *in = mlx5_vzalloc(*inlen); + *in = kvzalloc(*inlen, GFP_KERNEL); if (!*in) { err = -ENOMEM; goto err_buf; @@ -1060,7 +1060,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, return err; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_umem; @@ -1140,7 +1140,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, u32 rq_pas_size = get_rq_pas_size(qpc); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1193,7 +1193,7 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1372,7 +1372,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, } inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -1633,7 +1633,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, if (err) return err; } else { - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2164,7 +2164,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2189,7 +2189,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_tis_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2434,7 +2434,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -2479,7 +2479,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(modify_sq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) } } -static u8 get_fence(u8 fence, struct ib_send_wr *wr) -{ - if (unlikely(wr->opcode == IB_WR_LOCAL_INV && - wr->send_flags & IB_SEND_FENCE)) - return MLX5_FENCE_MODE_STRONG_ORDERING; - - if (unlikely(fence)) { - if (wr->send_flags & IB_SEND_FENCE) - return MLX5_FENCE_MODE_SMALL_AND_FENCE; - else - return fence; - } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { - return MLX5_FENCE_MODE_FENCE; - } - - return 0; -} - static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, struct mlx5_wqe_ctrl_seg **ctrl, struct ib_send_wr *wr, unsigned *idx, @@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static void finish_wqe(struct mlx5_ib_qp *qp, struct mlx5_wqe_ctrl_seg *ctrl, u8 size, unsigned idx, u64 wr_id, - int nreq, u8 fence, u8 next_fence, - u32 mlx5_opcode) + int nreq, u8 fence, u32 mlx5_opcode) { u8 opmod = 0; @@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, mlx5_opcode | ((u32)opmod << 24)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); ctrl->fm_ce_se |= fence; - qp->fm_cache = next_fence; if (unlikely(qp->wq_sig)) ctrl->signature = wq_sig(ctrl); @@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - fence = qp->fm_cache; num_sge = wr->num_sge; if (unlikely(num_sge > qp->sq.max_gs)) { mlx5_ib_warn(dev, "\n"); @@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } + if (wr->opcode == IB_WR_LOCAL_INV || + wr->opcode == IB_WR_REG_MR) { + fence = dev->umr_fence; + next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; + } else if (wr->send_flags & IB_SEND_FENCE) { + if (qp->next_fence) + fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; + else + fence = MLX5_FENCE_MODE_FENCE; + } else { + fence = qp->next_fence; + } + switch (ibqp->qp_type) { case IB_QPT_XRC_INI: xrc = seg; @@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; case IB_WR_LOCAL_INV: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); set_linv_wr(qp, &seg, &size); @@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, break; case IB_WR_REG_MR: - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->sq.wr_data[idx] = IB_WR_REG_MR; ctrl->imm = cpu_to_be32(reg_wr(wr)->key); err = set_reg_wr(qp, reg_wr(wr), &seg, &size); @@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_UMR); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_UMR); /* * SET_PSV WQEs are not signaled and solicited * on error @@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); if (err) { @@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, mr->sig->psv_wire.psv_idx, &seg, &size); @@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, goto out; } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, - nreq, get_fence(fence, wr), - next_fence, MLX5_OPCODE_SET_PSV); + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, + fence, MLX5_OPCODE_SET_PSV); + qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; num_sge = 0; goto skip_psv; @@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, } } - finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, - get_fence(fence, wr), next_fence, + qp->next_fence = next_fence; + finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, mlx5_ib_opcode[wr->opcode]); skip_psv: if (0) @@ -4294,7 +4281,7 @@ static int query_raw_packet_qp_sq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_sq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4321,7 +4308,7 @@ static int query_raw_packet_qp_rq_state(struct mlx5_ib_dev *dev, int err; inlen = MLX5_ST_SZ_BYTES(query_rq_out); - out = mlx5_vzalloc(inlen); + out = kvzalloc(inlen, GFP_KERNEL); if (!out) return -ENOMEM; @@ -4625,7 +4612,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, dev = to_mdev(pd->device); inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -4855,7 +4842,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, return ERR_PTR(-ENOMEM); inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err; @@ -4934,7 +4921,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, return -EOPNOTSUPP; inlen = MLX5_ST_SZ_BYTES(modify_rq_in); - in = mlx5_vzalloc(inlen); + in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 7cb145f9a6db..43707b101f47 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -127,7 +127,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, goto err_umem; } - in->pas = mlx5_vzalloc(sizeof(*in->pas) * ncont); + in->pas = kvzalloc(sizeof(*in->pas) * ncont, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_umem; @@ -189,7 +189,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, } mlx5_ib_dbg(dev, "srq->buf.page_shift = %d\n", srq->buf.page_shift); - in->pas = mlx5_vzalloc(sizeof(*in->pas) * srq->buf.npages); + in->pas = kvzalloc(sizeof(*in->pas) * srq->buf.npages, GFP_KERNEL); if (!in->pas) { err = -ENOMEM; goto err_buf; |