diff options
author | Shiraz, Saleem <shiraz.saleem@intel.com> | 2019-02-11 16:25:02 +0100 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-02-11 23:24:55 +0100 |
commit | b44e47eb065b65aa7cb2bd9c8d8d5c0009cd1393 (patch) | |
tree | 74ec3a16b43c8a21b004205533b8c7f40d0b27af /drivers/infiniband/hw/cxgb3/iwch_provider.c | |
parent | RDMA/cxgb4: Use for_each_sg_dma_page iterator on umem SGL (diff) | |
download | linux-b44e47eb065b65aa7cb2bd9c8d8d5c0009cd1393.tar.xz linux-b44e47eb065b65aa7cb2bd9c8d8d5c0009cd1393.zip |
RDMA/cxgb3: Use for_each_sg_dma_page iterator on umem SGL
Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.
Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.
Signed-off-by: Shiraz, Saleem <shiraz.saleem@intel.com>
Acked-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb3/iwch_provider.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 29 |
1 files changed, 12 insertions, 17 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 4cc9a6ae2139..80dff6804e48 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -516,14 +516,13 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { __be64 *pages; - int shift, n, len; - int i, k, entry; + int shift, n, i; int err = 0; struct iwch_dev *rhp; struct iwch_pd *php; struct iwch_mr *mhp; struct iwch_reg_user_mr_resp uresp; - struct scatterlist *sg; + struct sg_dma_page_iter sg_iter; pr_debug("%s ib_pd %p\n", __func__, pd); php = to_iwch_pd(pd); @@ -541,7 +540,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, return ERR_PTR(err); } - shift = mhp->umem->page_shift; + shift = PAGE_SHIFT; n = mhp->umem->nmap; @@ -557,19 +556,15 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, i = n = 0; - for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { - len = sg_dma_len(sg) >> shift; - for (k = 0; k < len; ++k) { - pages[i++] = cpu_to_be64(sg_dma_address(sg) + - (k << shift)); - if (i == PAGE_SIZE / sizeof *pages) { - err = iwch_write_pbl(mhp, pages, i, n); - if (err) - goto pbl_done; - n += i; - i = 0; - } - } + for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) { + pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter)); + if (i == PAGE_SIZE / sizeof *pages) { + err = iwch_write_pbl(mhp, pages, i, n); + if (err) + goto pbl_done; + n += i; + i = 0; + } } if (i) |