diff options
author | Yixing Liu <liuyixing1@huawei.com> | 2021-08-26 15:37:35 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-08-26 17:12:21 +0200 |
commit | ae2854c5d318c8415e2f033b29fcfcb81a9e9aa7 (patch) | |
tree | 92df4502f6a607a8e1f21c2cb6028d0c4cf4635d /drivers/infiniband | |
parent | RDMA/hns: Adjust the order in which irq are requested and enabled (diff) | |
download | linux-ae2854c5d318c8415e2f033b29fcfcb81a9e9aa7.tar.xz linux-ae2854c5d318c8415e2f033b29fcfcb81a9e9aa7.zip |
RDMA/hns: Encapsulate the qp db as a function
Encapsulate qp db into two functions: user and kernel.
Link: https://lore.kernel.org/r/1629985056-57004-7-git-send-email-liangwenpeng@huawei.com
Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_qp.c | 135 |
1 files changed, 82 insertions, 53 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index d45beeda1d81..74c9101d5deb 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -823,75 +823,104 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata, + struct hns_roce_ib_create_qp *ucmd, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { + ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, &hr_qp->sdb); + if (ret) { + ibdev_err(ibdev, + "failed to map user SQ doorbell, ret = %d.\n", + ret); + goto err_out; + } + hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; + } + + if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { + ret = hns_roce_db_map_user(uctx, ucmd->db_addr, &hr_qp->rdb); + if (ret) { + ibdev_err(ibdev, + "failed to map user RQ doorbell, ret = %d.\n", + ret); + goto err_sdb; + } + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; + } + + return 0; + +err_sdb: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) + hns_roce_db_unmap_user(uctx, &hr_qp->sdb); +err_out: + return ret; +} + +static int alloc_kernel_qp_db(struct hns_roce_dev *hr_dev, + struct hns_roce_qp *hr_qp, + struct ib_qp_init_attr *init_attr) +{ + struct ib_device *ibdev = &hr_dev->ib_dev; + int ret; + + if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) + hr_qp->sq.db_reg = hr_dev->mem_base + + HNS_ROCE_DWQE_SIZE * hr_qp->qpn; + else + hr_qp->sq.db_reg = hr_dev->reg_base + hr_dev->sdb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + + hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + + DB_REG_OFFSET * hr_dev->priv_uar.index; + + if (kernel_qp_has_rdb(hr_dev, init_attr)) { + ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); + if (ret) { + ibdev_err(ibdev, + "failed to alloc kernel RQ doorbell, ret = %d.\n", + ret); + return ret; + } + *hr_qp->rdb.db_record = 0; + hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; + } + + return 0; +} + static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, struct ib_udata *udata, struct hns_roce_ib_create_qp *ucmd, struct hns_roce_ib_create_qp_resp *resp) { - struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context( - udata, struct hns_roce_ucontext, ibucontext); - struct ib_device *ibdev = &hr_dev->ib_dev; int ret; if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SDI_MODE) hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { - if (user_qp_has_sdb(hr_dev, init_attr, udata, resp, ucmd)) { - ret = hns_roce_db_map_user(uctx, ucmd->sdb_addr, - &hr_qp->sdb); - if (ret) { - ibdev_err(ibdev, - "failed to map user SQ doorbell, ret = %d.\n", - ret); - goto err_out; - } - hr_qp->en_flags |= HNS_ROCE_QP_CAP_SQ_RECORD_DB; - } - - if (user_qp_has_rdb(hr_dev, init_attr, udata, resp)) { - ret = hns_roce_db_map_user(uctx, ucmd->db_addr, - &hr_qp->rdb); - if (ret) { - ibdev_err(ibdev, - "failed to map user RQ doorbell, ret = %d.\n", - ret); - goto err_sdb; - } - hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; - } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, + resp); + if (ret) + return ret; } else { - if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) - hr_qp->sq.db_reg = hr_dev->mem_base + - HNS_ROCE_DWQE_SIZE * hr_qp->qpn; - else - hr_qp->sq.db_reg = - hr_dev->reg_base + hr_dev->sdb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - - hr_qp->rq.db_reg = hr_dev->reg_base + hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - - if (kernel_qp_has_rdb(hr_dev, init_attr)) { - ret = hns_roce_alloc_db(hr_dev, &hr_qp->rdb, 0); - if (ret) { - ibdev_err(ibdev, - "failed to alloc kernel RQ doorbell, ret = %d.\n", - ret); - goto err_out; - } - *hr_qp->rdb.db_record = 0; - hr_qp->en_flags |= HNS_ROCE_QP_CAP_RQ_RECORD_DB; - } + ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); + if (ret) + return ret; } return 0; -err_sdb: - if (udata && hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) - hns_roce_db_unmap_user(uctx, &hr_qp->sdb); -err_out: - return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, |