diff options
author | Konstantin Taranov <kotaranov@microsoft.com> | 2024-01-23 00:22:59 +0100 |
---|---|---|
committer | Leon Romanovsky <leon@kernel.org> | 2024-01-25 11:03:00 +0100 |
commit | 71c8cbfcdc8f1dc651b976d4c12dc9b9fce675c1 (patch) | |
tree | 109f8e21c4adc26a7f9ef7ef34e7a651fbae2bec | |
parent | RDMA/hns: Simplify 'struct hns_roce_hem' allocation (diff) | |
download | linux-71c8cbfcdc8f1dc651b976d4c12dc9b9fce675c1.tar.xz linux-71c8cbfcdc8f1dc651b976d4c12dc9b9fce675c1.zip |
RDMA/mana_ib: Introduce mdev_to_gc helper function
Use a helper function to access gdma_context from mana_ib_dev.
This patch removes code repetitions as well as removes the need
to explicitly use gdma_dev, which was error-prone.
Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
Link: https://lore.kernel.org/r/1705965781-3235-2-git-send-email-kotaranov@linux.microsoft.com
Signed-off-by: Leon Romanovsky <leon@kernel.org>
-rw-r--r-- | drivers/infiniband/hw/mana/cq.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/main.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/mana_ib.h | 5 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/mr.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mana/qp.c | 12 |
5 files changed, 27 insertions, 37 deletions
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c index 83ebd070535a..3369e7b665f9 100644 --- a/drivers/infiniband/hw/mana/cq.c +++ b/drivers/infiniband/hw/mana/cq.c @@ -16,7 +16,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - gc = mdev->gdma_dev->gdma_context; + gc = mdev_to_gc(mdev); if (udata->inlen < sizeof(ucmd)) return -EINVAL; @@ -81,7 +81,7 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) int err; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - gc = mdev->gdma_dev->gdma_context; + gc = mdev_to_gc(mdev); err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region); if (err) { diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c index faca092456fa..e0f5138ca088 100644 --- a/drivers/infiniband/hw/mana/main.c +++ b/drivers/infiniband/hw/mana/main.c @@ -79,17 +79,17 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) struct gdma_create_pd_req req = {}; enum gdma_pd_flags flags = 0; struct mana_ib_dev *dev; - struct gdma_dev *mdev; + struct gdma_context *gc; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); - mdev = dev->gdma_dev; + gc = mdev_to_gc(dev); mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req), sizeof(resp)); req.flags = flags; - err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { @@ -119,17 +119,17 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) struct gdma_destory_pd_resp resp = {}; struct gdma_destroy_pd_req req = {}; struct mana_ib_dev *dev; - struct gdma_dev *mdev; + struct gdma_context *gc; int err; dev = container_of(ibdev, struct mana_ib_dev, ib_dev); - mdev = dev->gdma_dev; + gc = mdev_to_gc(dev); mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req), sizeof(resp)); req.pd_handle = pd->pd_handle; - err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req, + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { @@ -206,13 +206,11 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext, struct ib_device *ibdev = ibcontext->device; struct mana_ib_dev *mdev; struct gdma_context *gc; - struct gdma_dev *dev; int doorbell_page; int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - dev = mdev->gdma_dev; - gc = dev->gdma_context; + gc = mdev_to_gc(mdev); /* Allocate a doorbell page index */ ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page); @@ -238,7 +236,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - gc = mdev->gdma_dev->gdma_context; + gc = mdev_to_gc(mdev); ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell); if (ret) @@ -322,15 +320,13 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, size_t max_pgs_create_cmd; struct gdma_context *gc; size_t num_pages_total; - struct gdma_dev *mdev; unsigned long page_sz; unsigned int tail = 0; u64 *page_addr_list; void *request_buf; int err; - mdev = dev->gdma_dev; - gc = mdev->gdma_context; + gc = mdev_to_gc(dev); hwc = gc->hwc.driver_data; /* Hardware requires dma region to align to chosen page size */ @@ -426,10 +422,8 @@ out: int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region) { - struct gdma_dev *mdev = dev->gdma_dev; - struct gdma_context *gc; + struct gdma_context *gc = mdev_to_gc(dev); - gc = mdev->gdma_context; ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region); return mana_gd_destroy_dma_region(gc, gdma_region); @@ -447,7 +441,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) int ret; mdev = container_of(ibdev, struct mana_ib_dev, ib_dev); - gc = mdev->gdma_dev->gdma_context; + gc = mdev_to_gc(mdev); if (vma->vm_pgoff != 0) { ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff); @@ -531,7 +525,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev) req.hdr.resp.msg_version = GDMA_MESSAGE_V3; req.hdr.dev_id = dev->gdma_dev->dev_id; - err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req), + err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req), &req, sizeof(resp), &resp); if (err) { diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h index 6bdc0f5498d5..ebb6537620ee 100644 --- a/drivers/infiniband/hw/mana/mana_ib.h +++ b/drivers/infiniband/hw/mana/mana_ib.h @@ -142,6 +142,11 @@ struct mana_ib_query_adapter_caps_resp { u32 max_inline_data_size; }; /* HW Data */ +static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev) +{ + return mdev->gdma_dev->gdma_context; +} + int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem, mana_handle_t *gdma_region); diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c index 351207c60eb6..ee4d4f83467c 100644 --- a/drivers/infiniband/hw/mana/mr.c +++ b/drivers/infiniband/hw/mana/mr.c @@ -30,12 +30,9 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr, { struct gdma_create_mr_response resp = {}; struct gdma_create_mr_request req = {}; - struct gdma_dev *mdev = dev->gdma_dev; - struct gdma_context *gc; + struct gdma_context *gc = mdev_to_gc(dev); int err; - gc = mdev->gdma_context; - mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req), sizeof(resp)); req.pd_handle = mr_params->pd_handle; @@ -77,12 +74,9 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle) { struct gdma_destroy_mr_response resp = {}; struct gdma_destroy_mr_request req = {}; - struct gdma_dev *mdev = dev->gdma_dev; - struct gdma_context *gc; + struct gdma_context *gc = mdev_to_gc(dev); int err; - gc = mdev->gdma_context; - mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req), sizeof(resp)); @@ -164,8 +158,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, return &mr->ibmr; err_dma_region: - mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context, - dma_region_handle); + mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle); err_umem: ib_umem_release(mr->umem); diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index 21ac9fcadf3f..0c8d6ecfbb2a 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -17,12 +17,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, struct mana_cfg_rx_steer_resp resp = {}; mana_handle_t *req_indir_tab; struct gdma_context *gc; - struct gdma_dev *mdev; u32 req_buf_size; int i, err; - gc = dev->gdma_dev->gdma_context; - mdev = &gc->mana; + gc = mdev_to_gc(dev); req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE; @@ -39,7 +37,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, req->rx_enable = 1; req->update_default_rxobj = 1; req->default_rxobj = default_rxobj; - req->hdr.dev_id = mdev->dev_id; + req->hdr.dev_id = gc->mana.dev_id; /* If there are more than 1 entries in indirection table, enable RSS */ if (log_ind_tbl_size) @@ -99,6 +97,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp); struct mana_ib_dev *mdev = container_of(pd->device, struct mana_ib_dev, ib_dev); + struct gdma_context *gc = mdev_to_gc(mdev); struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl; struct mana_ib_create_qp_rss_resp resp = {}; struct mana_ib_create_qp_rss ucmd = {}; @@ -109,7 +108,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, unsigned int ind_tbl_size; struct mana_context *mc; struct net_device *ndev; - struct gdma_context *gc; struct mana_ib_cq *cq; struct mana_ib_wq *wq; struct gdma_dev *gd; @@ -120,7 +118,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, u32 port; int ret; - gc = mdev->gdma_dev->gdma_context; gd = &gc->mana; mc = gd->driver_data; @@ -307,6 +304,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, rdma_udata_to_drv_context(udata, struct mana_ib_ucontext, ibucontext); struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana; + struct gdma_context *gc = mdev_to_gc(mdev); struct mana_ib_create_qp_resp resp = {}; struct mana_ib_create_qp ucmd = {}; struct gdma_queue *gdma_cq = NULL; @@ -450,7 +448,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd, err_release_gdma_cq: kfree(gdma_cq); - gd->gdma_context->cq_table[send_cq->id] = NULL; + gc->cq_table[send_cq->id] = NULL; err_destroy_wq_obj: mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object); |