diff options
author | Xi Wang <wangxi11@huawei.com> | 2020-01-09 13:20:12 +0100 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2020-01-10 16:17:37 +0100 |
commit | 626903e9355bdf8d401fc0ac7e7407862c642710 (patch) | |
tree | 5abc505c557112abb7f420ec8890ce06ea0b5bd0 /drivers/infiniband/hw/hns/hns_roce_main.c | |
parent | RDMA/hns: Bugfix for posting a wqe with sge (diff) | |
download | linux-626903e9355bdf8d401fc0ac7e7407862c642710.tar.xz linux-626903e9355bdf8d401fc0ac7e7407862c642710.zip |
RDMA/hns: Add support for reporting wc as software mode
When hardware is in resetting stage, we may can't poll back all the
expected work completions as the hardware won't generate cqe anymore.
This patch allows the driver to compose the expected wc instead of the
hardware during resetting stage. Once the hardware finished resetting, we
can poll cq from hardware again.
Link: https://lore.kernel.org/r/1578572412-25756-1-git-send-email-liweihang@huawei.com
Signed-off-by: Xi Wang <wangxi11@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'drivers/infiniband/hw/hns/hns_roce_main.c')
-rw-r--r-- | drivers/infiniband/hw/hns/hns_roce_main.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 84e4707337a9..6e589f2ca35e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -862,6 +862,50 @@ err_uar_table_free: return ret; } +static void check_and_get_armed_cq(struct list_head *cq_list, struct ib_cq *cq) +{ + struct hns_roce_cq *hr_cq = to_hr_cq(cq); + unsigned long flags; + + spin_lock_irqsave(&hr_cq->lock, flags); + if (cq->comp_handler) { + if (!hr_cq->is_armed) { + hr_cq->is_armed = 1; + list_add_tail(&hr_cq->node, cq_list); + } + } + spin_unlock_irqrestore(&hr_cq->lock, flags); +} + +void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev) +{ + struct hns_roce_qp *hr_qp; + struct hns_roce_cq *hr_cq; + struct list_head cq_list; + unsigned long flags_qp; + unsigned long flags; + + INIT_LIST_HEAD(&cq_list); + + spin_lock_irqsave(&hr_dev->qp_list_lock, flags); + list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { + spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); + if (hr_qp->sq.tail != hr_qp->sq.head) + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); + spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); + + spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); + if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) + check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); + spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); + } + + list_for_each_entry(hr_cq, &cq_list, node) + hns_roce_cq_completion(hr_dev, hr_cq->cqn); + + spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags); +} + int hns_roce_init(struct hns_roce_dev *hr_dev) { int ret; @@ -932,6 +976,9 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) } } + INIT_LIST_HEAD(&hr_dev->qp_list); + spin_lock_init(&hr_dev->qp_list_lock); + ret = hns_roce_register_device(hr_dev); if (ret) goto error_failed_register_device; |