diff options
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 38 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/provider.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 74 |
4 files changed, 120 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index c7bb38c931a5..6a8e2696455f 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -386,6 +386,10 @@ struct c4iw_mr { struct c4iw_dev *rhp; u64 kva; struct tpt_attributes attr; + u64 *mpl; + dma_addr_t mpl_addr; + u32 max_mpl_len; + u32 mpl_len; }; static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) @@ -973,6 +977,9 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, u32 max_num_sg); +int c4iw_map_mr_sg(struct ib_mr *ibmr, + struct scatterlist *sg, + int sg_nents); int c4iw_dealloc_mw(struct ib_mw *mw); struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 140415d31bcc..1e46a260a0fa 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -863,6 +863,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, u32 mmid; u32 stag = 0; int ret = 0; + int length = roundup(max_num_sg * sizeof(u64), 32); if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > t4_max_fr_depth(use_dsgl)) @@ -876,6 +877,14 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, goto err; } + mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev, + length, &mhp->mpl_addr, GFP_KERNEL); + if (!mhp->mpl) { + ret = -ENOMEM; + goto err_mpl; + } + mhp->max_mpl_len = length; + mhp->rhp = rhp; ret = alloc_pbl(mhp, max_num_sg); if (ret) @@ -905,11 +914,37 @@ err2: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err1: + dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, + mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); +err_mpl: kfree(mhp); err: return ERR_PTR(ret); } +static int c4iw_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct c4iw_mr *mhp = to_c4iw_mr(ibmr); + + if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) + return -ENOMEM; + + mhp->mpl[mhp->mpl_len++] = addr; + + return 0; +} + +int c4iw_map_mr_sg(struct ib_mr *ibmr, + struct scatterlist *sg, + int sg_nents) +{ + struct c4iw_mr *mhp = to_c4iw_mr(ibmr); + + mhp->mpl_len = 0; + + return ib_sg_to_pages(ibmr, sg, sg_nents, c4iw_set_page); +} + struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, int page_list_len) { @@ -970,6 +1005,9 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; remove_handle(rhp, &rhp->mmidr, mmid); + if (mhp->mpl) + dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev, + mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (mhp->attr.pbl_size) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index df3b3f1ad066..e292308e82bd 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -557,6 +557,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.bind_mw = c4iw_bind_mw; dev->ibdev.dealloc_mw = c4iw_dealloc_mw; dev->ibdev.alloc_mr = c4iw_alloc_mr; + dev->ibdev.map_mr_sg = c4iw_map_mr_sg; dev->ibdev.alloc_fast_reg_page_list = c4iw_alloc_fastreg_pbl; dev->ibdev.free_fast_reg_page_list = c4iw_free_fastreg_pbl; dev->ibdev.attach_mcast = c4iw_multicast_attach; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 1dc9f11a4243..aac75a068768 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -605,10 +605,76 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, return 0; } +static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, + struct ib_reg_wr *wr, u8 *len16, u8 t5dev) +{ + struct c4iw_mr *mhp = to_c4iw_mr(wr->mr); + struct fw_ri_immd *imdp; + __be64 *p; + int i; + int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32); + int rem; + + if (mhp->mpl_len > t4_max_fr_depth(use_dsgl)) + return -EINVAL; + + wqe->fr.qpbinde_to_dcacpu = 0; + wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12; + wqe->fr.addr_type = FW_RI_VA_BASED_TO; + wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access); + wqe->fr.len_hi = 0; + wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length); + wqe->fr.stag = cpu_to_be32(wr->key); + wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); + wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & + 0xffffffff); + + if (t5dev && use_dsgl && (pbllen > max_fr_immd)) { + struct fw_ri_dsgl *sglp; + + for (i = 0; i < mhp->mpl_len; i++) + mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]); + + sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1); + sglp->op = FW_RI_DATA_DSGL; + sglp->r1 = 0; + sglp->nsge = cpu_to_be16(1); + sglp->addr0 = cpu_to_be64(mhp->mpl_addr); + sglp->len0 = cpu_to_be32(pbllen); + + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16); + } else { + imdp = (struct fw_ri_immd *)(&wqe->fr + 1); + imdp->op = FW_RI_DATA_IMMD; + imdp->r1 = 0; + imdp->r2 = 0; + imdp->immdlen = cpu_to_be32(pbllen); + p = (__be64 *)(imdp + 1); + rem = pbllen; + for (i = 0; i < mhp->mpl_len; i++) { + *p = cpu_to_be64((u64)mhp->mpl[i]); + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + BUG_ON(rem < 0); + while (rem) { + *p = 0; + rem -= sizeof(*p); + if (++p == (__be64 *)&sq->queue[sq->size]) + p = (__be64 *)sq->queue; + } + *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp) + + pbllen, 16); + } + return 0; +} + static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, struct ib_send_wr *send_wr, u8 *len16, u8 t5dev) { struct ib_fast_reg_wr *wr = fast_reg_wr(send_wr); + struct fw_ri_immd *imdp; __be64 *p; int i; @@ -815,6 +881,14 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, qhp->rhp->rdev.lldi.adapter_type) ? 1 : 0); break; + case IB_WR_REG_MR: + fw_opcode = FW_RI_FR_NSMR_WR; + swsqe->opcode = FW_RI_FAST_REGISTER; + err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, + is_t5( + qhp->rhp->rdev.lldi.adapter_type) ? + 1 : 0); + break; case IB_WR_LOCAL_INV: if (wr->send_flags & IB_SEND_FENCE) fw_flags |= FW_RI_LOCAL_FENCE_FLAG; |