diff options
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 187 |
1 files changed, 120 insertions, 67 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 54a15b5d336d..ec8993a7b3be 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -687,7 +687,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, int access_flags) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; + struct mlx5_umr_wr *umrwr = umr_wr(wr); sg->addr = dma; sg->length = ALIGN(sizeof(u64) * n, 64); @@ -715,7 +715,7 @@ static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr, static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, struct ib_send_wr *wr, u32 key) { - struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr->wr.fast_reg; + struct mlx5_umr_wr *umrwr = umr_wr(wr); wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE; wr->opcode = MLX5_IB_WR_UMR; @@ -752,7 +752,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, struct device *ddev = dev->ib_dev.dma_device; struct umr_common *umrc = &dev->umrc; struct mlx5_ib_umr_context umr_context; - struct ib_send_wr wr, *bad; + struct mlx5_umr_wr umrwr; + struct ib_send_wr *bad; struct mlx5_ib_mr *mr; struct ib_sge sg; int size; @@ -798,14 +799,14 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, goto free_pas; } - memset(&wr, 0, sizeof(wr)); - wr.wr_id = (u64)(unsigned long)&umr_context; - prep_umr_reg_wqe(pd, &wr, &sg, dma, npages, mr->mmr.key, page_shift, - virt_addr, len, access_flags); + memset(&umrwr, 0, sizeof(umrwr)); + umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; + prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmr.key, + page_shift, virt_addr, len, access_flags); mlx5_ib_init_umr_context(&umr_context); down(&umrc->sem); - err = ib_post_send(umrc->qp, &wr, &bad); + err = ib_post_send(umrc->qp, &umrwr.wr, &bad); if (err) { mlx5_ib_warn(dev, "post send failed, err %d\n", err); goto unmap_dma; @@ -851,8 +852,8 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, int size; __be64 *pas; dma_addr_t dma; - struct ib_send_wr wr, *bad; - struct mlx5_umr_wr *umrwr = (struct mlx5_umr_wr *)&wr.wr.fast_reg; + struct ib_send_wr *bad; + struct mlx5_umr_wr wr; struct ib_sge sg; int err = 0; const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64); @@ -917,26 +918,26 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages, dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); memset(&wr, 0, sizeof(wr)); - wr.wr_id = (u64)(unsigned long)&umr_context; + wr.wr.wr_id = (u64)(unsigned long)&umr_context; sg.addr = dma; sg.length = ALIGN(npages * sizeof(u64), MLX5_UMR_MTT_ALIGNMENT); sg.lkey = dev->umrc.pd->local_dma_lkey; - wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | + wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE | MLX5_IB_SEND_UMR_UPDATE_MTT; - wr.sg_list = &sg; - wr.num_sge = 1; - wr.opcode = MLX5_IB_WR_UMR; - umrwr->npages = sg.length / sizeof(u64); - umrwr->page_shift = PAGE_SHIFT; - umrwr->mkey = mr->mmr.key; - umrwr->target.offset = start_page_index; + wr.wr.sg_list = &sg; + wr.wr.num_sge = 1; + wr.wr.opcode = MLX5_IB_WR_UMR; + wr.npages = sg.length / sizeof(u64); + wr.page_shift = PAGE_SHIFT; + wr.mkey = mr->mmr.key; + wr.target.offset = start_page_index; mlx5_ib_init_umr_context(&umr_context); down(&umrc->sem); - err = ib_post_send(umrc->qp, &wr, &bad); + err = ib_post_send(umrc->qp, &wr.wr, &bad); if (err) { mlx5_ib_err(dev, "UMR post send failed, err %d\n", err); } else { @@ -1122,16 +1123,17 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { struct umr_common *umrc = &dev->umrc; struct mlx5_ib_umr_context umr_context; - struct ib_send_wr wr, *bad; + struct mlx5_umr_wr umrwr; + struct ib_send_wr *bad; int err; - memset(&wr, 0, sizeof(wr)); - wr.wr_id = (u64)(unsigned long)&umr_context; - prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); + memset(&umrwr.wr, 0, sizeof(umrwr)); + umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; + prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmr.key); mlx5_ib_init_umr_context(&umr_context); down(&umrc->sem); - err = ib_post_send(umrc->qp, &wr, &bad); + err = ib_post_send(umrc->qp, &umrwr.wr, &bad); if (err) { up(&umrc->sem); mlx5_ib_dbg(dev, "err %d\n", err); @@ -1151,6 +1153,52 @@ error: return err; } +static int +mlx5_alloc_priv_descs(struct ib_device *device, + struct mlx5_ib_mr *mr, + int ndescs, + int desc_size) +{ + int size = ndescs * desc_size; + int add_size; + int ret; + + add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0); + + mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL); + if (!mr->descs_alloc) + return -ENOMEM; + + mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN); + + mr->desc_map = dma_map_single(device->dma_device, mr->descs, + size, DMA_TO_DEVICE); + if (dma_mapping_error(device->dma_device, mr->desc_map)) { + ret = -ENOMEM; + goto err; + } + + return 0; +err: + kfree(mr->descs_alloc); + + return ret; +} + +static void +mlx5_free_priv_descs(struct mlx5_ib_mr *mr) +{ + if (mr->descs) { + struct ib_device *device = mr->ibmr.device; + int size = mr->max_descs * mr->desc_size; + + dma_unmap_single(device->dma_device, mr->desc_map, + size, DMA_TO_DEVICE); + kfree(mr->descs_alloc); + mr->descs = NULL; + } +} + static int clean_mr(struct mlx5_ib_mr *mr) { struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); @@ -1170,6 +1218,8 @@ static int clean_mr(struct mlx5_ib_mr *mr) mr->sig = NULL; } + mlx5_free_priv_descs(mr); + if (!umred) { err = destroy_mkey(dev, mr); if (err) { @@ -1259,6 +1309,14 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, if (mr_type == IB_MR_TYPE_MEM_REG) { access_mode = MLX5_ACCESS_MODE_MTT; in->seg.log2_page_size = PAGE_SHIFT; + + err = mlx5_alloc_priv_descs(pd->device, mr, + ndescs, sizeof(u64)); + if (err) + goto err_free_in; + + mr->desc_size = sizeof(u64); + mr->max_descs = ndescs; } else if (mr_type == IB_MR_TYPE_SIGNATURE) { u32 psv_index[2]; @@ -1315,6 +1373,7 @@ err_destroy_psv: mlx5_ib_warn(dev, "failed to destroy wire psv %d\n", mr->sig->psv_wire.psv_idx); } + mlx5_free_priv_descs(mr); err_free_sig: kfree(mr->sig); err_free_in: @@ -1324,48 +1383,6 @@ err_free: return ERR_PTR(err); } -struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, - int page_list_len) -{ - struct mlx5_ib_fast_reg_page_list *mfrpl; - int size = page_list_len * sizeof(u64); - - mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL); - if (!mfrpl) - return ERR_PTR(-ENOMEM); - - mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL); - if (!mfrpl->ibfrpl.page_list) - goto err_free; - - mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device, - size, &mfrpl->map, - GFP_KERNEL); - if (!mfrpl->mapped_page_list) - goto err_free; - - WARN_ON(mfrpl->map & 0x3f); - - return &mfrpl->ibfrpl; - -err_free: - kfree(mfrpl->ibfrpl.page_list); - kfree(mfrpl); - return ERR_PTR(-ENOMEM); -} - -void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) -{ - struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); - struct mlx5_ib_dev *dev = to_mdev(page_list->device); - int size = page_list->max_page_list_len * sizeof(u64); - - dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list, - mfrpl->map); - kfree(mfrpl->ibfrpl.page_list); - kfree(mfrpl); -} - int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, struct ib_mr_status *mr_status) { @@ -1406,3 +1423,39 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, done: return ret; } + +static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + __be64 *descs; + + if (unlikely(mr->ndescs == mr->max_descs)) + return -ENOMEM; + + descs = mr->descs; + descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + + return 0; +} + +int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, + struct scatterlist *sg, + int sg_nents) +{ + struct mlx5_ib_mr *mr = to_mmr(ibmr); + int n; + + mr->ndescs = 0; + + ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, + mr->desc_size * mr->max_descs, + DMA_TO_DEVICE); + + n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page); + + ib_dma_sync_single_for_device(ibmr->device, mr->desc_map, + mr->desc_size * mr->max_descs, + DMA_TO_DEVICE); + + return n; +} |