summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/verbs.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-09-05 12:56:21 +0200
committerDoug Ledford <dledford@redhat.com>2016-09-23 19:47:44 +0200
commit5ef990f06bd7e3cf521b5705d898d8e43d04ea90 (patch)
tree5fb4ffe4a54d1063fd26d08ac95383ab7f11a311 /drivers/infiniband/core/verbs.c
parentnvme-rdma: use IB_PD_UNSAFE_GLOBAL_RKEY (diff)
downloadlinux-5ef990f06bd7e3cf521b5705d898d8e43d04ea90.tar.xz
linux-5ef990f06bd7e3cf521b5705d898d8e43d04ea90.zip
IB/core: remove ib_get_dma_mr
We now only use it from ib_alloc_pd to create a local DMA lkey if the device doesn't provide one, or a global rkey if the ULP requests it. This patch removes ib_get_dma_mr and open codes the functionality in ib_alloc_pd so that we can simplify the code and prevent abuse of the functionality. As a side effect we can also simplify things by removing the valid access bit check, and the PD refcounting. In the future I hope to also remove the per-PD global MR entirely by shifting this work into the HW drivers, as one step towards avoiding the struct ib_mr overload for various different use cases. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/core/verbs.c')
-rw-r--r--drivers/infiniband/core/verbs.c34
1 files changed, 8 insertions, 26 deletions
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index e87b5187729c..d4ef3b1a695a 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -256,12 +256,17 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
if (mr_access_flags) {
struct ib_mr *mr;
- mr = ib_get_dma_mr(pd, mr_access_flags);
+ mr = pd->device->get_dma_mr(pd, mr_access_flags);
if (IS_ERR(mr)) {
ib_dealloc_pd(pd);
- return (struct ib_pd *)mr;
+ return ERR_CAST(mr);
}
+ mr->device = pd->device;
+ mr->pd = pd;
+ mr->uobject = NULL;
+ mr->need_inval = false;
+
pd->__internal_mr = mr;
if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
@@ -288,7 +293,7 @@ void ib_dealloc_pd(struct ib_pd *pd)
int ret;
if (pd->__internal_mr) {
- ret = ib_dereg_mr(pd->__internal_mr);
+ ret = pd->device->dereg_mr(pd->__internal_mr);
WARN_ON(ret);
pd->__internal_mr = NULL;
}
@@ -1408,29 +1413,6 @@ EXPORT_SYMBOL(ib_resize_cq);
/* Memory regions */
-struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
- struct ib_mr *mr;
- int err;
-
- err = ib_check_mr_access(mr_access_flags);
- if (err)
- return ERR_PTR(err);
-
- mr = pd->device->get_dma_mr(pd, mr_access_flags);
-
- if (!IS_ERR(mr)) {
- mr->device = pd->device;
- mr->pd = pd;
- mr->uobject = NULL;
- atomic_inc(&pd->usecnt);
- mr->need_inval = false;
- }
-
- return mr;
-}
-EXPORT_SYMBOL(ib_get_dma_mr);
-
int ib_dereg_mr(struct ib_mr *mr)
{
struct ib_pd *pd = mr->pd;