diff options
author | Bob Pearson <rpearsonhpe@gmail.com> | 2022-08-05 20:31:54 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2022-09-27 15:15:24 +0200 |
commit | 58651bbb30f87dab474eff31ab564391aa6ea1f3 (patch) | |
tree | 9d1ce1035b99118c499cbf88de77263f7b45dd69 /drivers/infiniband | |
parent | RDMA/rxe: Add send_common_ack() helper (diff) | |
download | linux-58651bbb30f87dab474eff31ab564391aa6ea1f3.tar.xz linux-58651bbb30f87dab474eff31ab564391aa6ea1f3.zip |
RDMA/rxe: Set pd early in mr alloc routines
Move setting of pd in mr objects ahead of any possible errors so that it
will always be set in rxe_mr_cleanup() to avoid seg faults when
rxe_put(mr_pd(mr)) is called.
Fixes: cf40367961d8 ("RDMA/rxe: Move mr cleanup code to rxe_mr_cleanup()")
Link: https://lore.kernel.org/r/20220805183153.32007-2-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Reviewed-by: Li Zhijian <lizhijian@fujitsu.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_loc.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_mr.c | 11 | ||||
-rw-r--r-- | drivers/infiniband/sw/rxe/rxe_verbs.c | 12 |
3 files changed, 14 insertions, 15 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index 22f6cc31d1d6..c2a5c8814a48 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -64,10 +64,10 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); /* rxe_mr.c */ u8 rxe_get_next_key(u32 last_key); -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr); -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, +void rxe_mr_init_dma(int access, struct rxe_mr *mr); +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr); -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr); +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr); int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, enum rxe_mr_copy_dir dir); int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma, diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 6b0c2e7b8145..502e9ada99b3 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -103,17 +103,16 @@ err1: return -ENOMEM; } -void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) +void rxe_mr_init_dma(int access, struct rxe_mr *mr) { rxe_mr_init(access, mr); - mr->ibmr.pd = &pd->ibpd; mr->access = access; mr->state = RXE_MR_STATE_VALID; mr->type = IB_MR_TYPE_DMA; } -int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, +int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr) { struct rxe_map **map; @@ -125,7 +124,7 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int err; int i; - umem = ib_umem_get(pd->ibpd.device, start, length, access); + umem = ib_umem_get(&rxe->ib_dev, start, length, access); if (IS_ERR(umem)) { pr_warn("%s: Unable to pin memory region err = %d\n", __func__, (int)PTR_ERR(umem)); @@ -175,7 +174,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, } } - mr->ibmr.pd = &pd->ibpd; mr->umem = umem; mr->access = access; mr->offset = ib_umem_offset(umem); @@ -194,7 +192,7 @@ err_out: return err; } -int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) +int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) { int err; @@ -205,7 +203,6 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) if (err) goto err1; - mr->ibmr.pd = &pd->ibpd; mr->max_buf = max_pages; mr->state = RXE_MR_STATE_FREE; mr->type = IB_MR_TYPE_MEM_REG; diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index da1c484798dd..3d37216609e4 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -903,7 +903,9 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access) return ERR_PTR(-ENOMEM); rxe_get(pd); - rxe_mr_init_dma(pd, access, mr); + mr->ibmr.pd = ibpd; + + rxe_mr_init_dma(access, mr); rxe_finalize(mr); return &mr->ibmr; @@ -928,8 +930,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, rxe_get(pd); + mr->ibmr.pd = ibpd; - err = rxe_mr_init_user(pd, start, length, iova, access, mr); + err = rxe_mr_init_user(rxe, start, length, iova, access, mr); if (err) goto err3; @@ -938,7 +941,6 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, return &mr->ibmr; err3: - rxe_put(pd); rxe_cleanup(mr); err2: return ERR_PTR(err); @@ -962,8 +964,9 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, } rxe_get(pd); + mr->ibmr.pd = ibpd; - err = rxe_mr_init_fast(pd, max_num_sg, mr); + err = rxe_mr_init_fast(max_num_sg, mr); if (err) goto err2; @@ -972,7 +975,6 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type, return &mr->ibmr; err2: - rxe_put(pd); rxe_cleanup(mr); err1: return ERR_PTR(err); |