diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
| -rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 10 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 42 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 3 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 14 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 34 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 15 | 
6 files changed, 74 insertions, 44 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 5b974fb97611..15457c9569a7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -226,7 +226,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,  		wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);  		break;  	} -	wc->slid	   = be16_to_cpu(cqe->slid);  	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;  	wc->dlid_path_bits = cqe->ml_path;  	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; @@ -241,10 +240,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,  	}  	if (ll != IB_LINK_LAYER_ETHERNET) { +		wc->slid = be16_to_cpu(cqe->slid);  		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;  		return;  	} +	wc->slid = 0;  	vlan_present = cqe->l4_l3_hdr_type & 0x1;  	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;  	if (vlan_present) { @@ -1177,7 +1178,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,  	if (ucmd.reserved0 || ucmd.reserved1)  		return -EINVAL; -	umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, +	/* check multiplication overflow */ +	if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) +		return -EINVAL; + +	umem = ib_umem_get(context, ucmd.buf_addr, +			   (size_t)ucmd.cqe_size * entries,  			   IB_ACCESS_LOCAL_WRITE, 1);  	if (IS_ERR(umem)) {  		err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 4236c8086820..da091de4e69d 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -245,12 +245,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,  	struct mlx5_ib_multiport_info *mpi;  	struct mlx5_ib_port *port; +	if (!mlx5_core_mp_enabled(ibdev->mdev) || +	    ll != IB_LINK_LAYER_ETHERNET) { +		if (native_port_num) +			*native_port_num = ib_port_num; +		return ibdev->mdev; +	} +  	if (native_port_num)  		*native_port_num = 1; -	if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) -		return ibdev->mdev; -  	port = &ibdev->port[ib_port_num - 1];  	if (!port)  		return NULL; @@ -3263,7 +3267,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	struct mlx5_ib_dev *ibdev;  	struct ib_event ibev;  	bool fatal = false; -	u8 port = 0; +	u8 port = (u8)work->param;  	if (mlx5_core_is_mp_slave(work->dev)) {  		ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); @@ -3283,8 +3287,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	case MLX5_DEV_EVENT_PORT_UP:  	case MLX5_DEV_EVENT_PORT_DOWN:  	case MLX5_DEV_EVENT_PORT_INITIALIZED: -		port = (u8)work->param; -  		/* In RoCE, port up/down events are handled in  		 * mlx5_netdev_event().  		 */ @@ -3298,24 +3300,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	case MLX5_DEV_EVENT_LID_CHANGE:  		ibev.event = IB_EVENT_LID_CHANGE; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_PKEY_CHANGE:  		ibev.event = IB_EVENT_PKEY_CHANGE; -		port = (u8)work->param; -  		schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);  		break;  	case MLX5_DEV_EVENT_GUID_CHANGE:  		ibev.event = IB_EVENT_GID_CHANGE; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_CLIENT_REREG:  		ibev.event = IB_EVENT_CLIENT_REREGISTER; -		port = (u8)work->param;  		break;  	case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:  		schedule_work(&ibdev->delay_drop.delay_drop_work); @@ -3327,7 +3324,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)  	ibev.device	      = &ibdev->ib_dev;  	ibev.element.port_num = port; -	if (port < 1 || port > ibdev->num_ports) { +	if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {  		mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);  		goto out;  	} @@ -4863,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)  	return ib_register_device(&dev->ib_dev, NULL);  } -static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)  { -	ib_unregister_device(&dev->ib_dev); +	destroy_umrc_res(dev);  } -static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) +static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)  { -	return create_umr_res(dev); +	ib_unregister_device(&dev->ib_dev);  } -static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) +static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)  { -	destroy_umrc_res(dev); +	return create_umr_res(dev);  }  static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) @@ -4985,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {  	STAGE_CREATE(MLX5_IB_STAGE_BFREG,  		     mlx5_ib_stage_bfrag_init,  		     mlx5_ib_stage_bfrag_cleanup), +	STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, +		     NULL, +		     mlx5_ib_stage_pre_ib_reg_umr_cleanup),  	STAGE_CREATE(MLX5_IB_STAGE_IB_REG,  		     mlx5_ib_stage_ib_reg_init,  		     mlx5_ib_stage_ib_reg_cleanup), -	STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, -		     mlx5_ib_stage_umr_res_init, -		     mlx5_ib_stage_umr_res_cleanup), +	STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, +		     mlx5_ib_stage_post_ib_reg_umr_init, +		     NULL),  	STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,  		     mlx5_ib_stage_delay_drop_init,  		     mlx5_ib_stage_delay_drop_cleanup), diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 139385129973..a5272499b600 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -739,8 +739,9 @@ enum mlx5_ib_stages {  	MLX5_IB_STAGE_CONG_DEBUGFS,  	MLX5_IB_STAGE_UAR,  	MLX5_IB_STAGE_BFREG, +	MLX5_IB_STAGE_PRE_IB_REG_UMR,  	MLX5_IB_STAGE_IB_REG, -	MLX5_IB_STAGE_UMR_RESOURCES, +	MLX5_IB_STAGE_POST_IB_REG_UMR,  	MLX5_IB_STAGE_DELAY_DROP,  	MLX5_IB_STAGE_CLASS_ATTR,  	MLX5_IB_STAGE_MAX, diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 556e015678de..c51c602f06d6 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,  	*umem = ib_umem_get(pd->uobject->context, start, length,  			    access_flags, 0);  	err = PTR_ERR_OR_ZERO(*umem); -	if (err < 0) { +	if (err) { +		*umem = NULL;  		mlx5_ib_err(dev, "umem get failed (%d)\n", err);  		return err;  	} @@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,  		if (err) {  			mlx5_ib_warn(dev, "Failed to rereg UMR\n");  			ib_umem_release(mr->umem); +			mr->umem = NULL;  			clean_mr(dev, mr);  			return err;  		} @@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  		u32 key = mr->mmkey.key;  		err = destroy_mkey(dev, mr); -		kfree(mr);  		if (err) {  			mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",  				     key, err);  			return err;  		} -	} else { -		mlx5_mr_cache_free(dev, mr);  	}  	return 0; @@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)  		atomic_sub(npages, &dev->mdev->priv.reg_pages);  	} +	if (!mr->allocated_from_cache) +		kfree(mr); +	else +		mlx5_mr_cache_free(dev, mr); +  	return 0;  } @@ -1816,7 +1820,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,  	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;  	mr->ibmr.length = 0; -	mr->ndescs = sg_nents;  	for_each_sg(sgl, sg, sg_nents, i) {  		if (unlikely(i >= mr->max_descs)) @@ -1828,6 +1831,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,  		sg_offset = 0;  	} +	mr->ndescs = i;  	if (sg_offset_p)  		*sg_offset_p = sg_offset; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 39d24bf694a8..a2e1aa86e133 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,  	ib_umem_release(sq->ubuffer.umem);  } -static int get_rq_pas_size(void *qpc) +static size_t get_rq_pas_size(void *qpc)  {  	u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;  	u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); @@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc)  }  static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, -				   struct mlx5_ib_rq *rq, void *qpin) +				   struct mlx5_ib_rq *rq, void *qpin, +				   size_t qpinlen)  {  	struct mlx5_ib_qp *mqp = rq->base.container_mibqp;  	__be64 *pas; @@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,  	void *rqc;  	void *wq;  	void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); -	int inlen; +	size_t rq_pas_size = get_rq_pas_size(qpc); +	size_t inlen;  	int err; -	u32 rq_pas_size = get_rq_pas_size(qpc); + +	if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas)) +		return -EINVAL;  	inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;  	in = kvzalloc(inlen, GFP_KERNEL); @@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,  }  static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, -				u32 *in, +				u32 *in, size_t inlen,  				struct ib_pd *pd)  {  	struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; @@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,  			rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;  		if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)  			rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; -		err = create_raw_packet_qp_rq(dev, rq, in); +		err = create_raw_packet_qp_rq(dev, rq, in, inlen);  		if (err)  			goto err_destroy_sq; @@ -1584,6 +1588,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	u32 uidx = MLX5_IB_DEFAULT_UIDX;  	struct mlx5_ib_create_qp ucmd;  	struct mlx5_ib_qp_base *base; +	int mlx5_st;  	void *qpc;  	u32 *in;  	int err; @@ -1592,6 +1597,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	spin_lock_init(&qp->sq.lock);  	spin_lock_init(&qp->rq.lock); +	mlx5_st = to_mlx5_st(init_attr->qp_type); +	if (mlx5_st < 0) +		return -EINVAL; +  	if (init_attr->rwq_ind_tbl) {  		if (!udata)  			return -ENOSYS; @@ -1753,7 +1762,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  	qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); -	MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); +	MLX5_SET(qpc, qpc, st, mlx5_st);  	MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);  	if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) @@ -1867,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,  		}  	} +	if (inlen < 0) { +		err = -EINVAL; +		goto err; +	} +  	if (init_attr->qp_type == IB_QPT_RAW_PACKET ||  	    qp->flags & MLX5_IB_QP_UNDERLAY) {  		qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;  		raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); -		err = create_raw_packet_qp(dev, qp, in, pd); +		err = create_raw_packet_qp(dev, qp, in, inlen, pd);  	} else {  		err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);  	} @@ -3095,8 +3109,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,  		goto out;  	if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || -	    !optab[mlx5_cur][mlx5_new]) +	    !optab[mlx5_cur][mlx5_new]) { +		err = -EINVAL;  		goto out; +	}  	op = optab[mlx5_cur][mlx5_new];  	optpar = ib_mask_to_mlx5_opt(attr_mask); diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 6d5fadad9090..3c7522d025f2 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c @@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,  {  	struct mlx5_ib_dev *dev = to_mdev(pd->device);  	struct mlx5_ib_srq *srq; -	int desc_size; -	int buf_size; +	size_t desc_size; +	size_t buf_size;  	int err;  	struct mlx5_srq_attr in = {0};  	__u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); @@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,  	desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +  		    srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); +	if (desc_size == 0 || srq->msrq.max_gs > desc_size) +		return ERR_PTR(-EINVAL);  	desc_size = roundup_pow_of_two(desc_size); -	desc_size = max_t(int, 32, desc_size); +	desc_size = max_t(size_t, 32, desc_size); +	if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) +		return ERR_PTR(-EINVAL);  	srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /  		sizeof(struct mlx5_wqe_data_seg);  	srq->msrq.wqe_shift = ilog2(desc_size);  	buf_size = srq->msrq.max * desc_size; -	mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", -		    desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, -		    srq->msrq.max_avail_gather); +	if (buf_size < desc_size) +		return ERR_PTR(-EINVAL);  	in.type = init_attr->srq_type;  	if (pd->uobject)  | 

