summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2020-06-11 08:44:51 +0200
committerJens Axboe <axboe@kernel.dk>2020-06-24 17:15:57 +0200
commit8446546cc21819478542e9d728bce85e39898fc5 (patch)
tree1a67381294a054c65f20963b14d13d8d9f37fde3 /drivers/nvme
parentblk-mq: add a new blk_mq_complete_request_remote API (diff)
downloadlinux-8446546cc21819478542e9d728bce85e39898fc5.tar.xz
linux-8446546cc21819478542e9d728bce85e39898fc5.zip
nvme-rdma: factor out a nvme_rdma_end_request helper
Factor a small sniplet of duplicated code into a new helper in preparation for making this sniplet a little bit less trivial. Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/rdma.c36
1 files changed, 17 insertions, 19 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f8f856dc0c67..9c02cf494a82 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1149,6 +1149,15 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
queue_work(nvme_reset_wq, &ctrl->err_work);
}
+static void nvme_rdma_end_request(struct nvme_rdma_request *req)
+{
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!refcount_dec_and_test(&req->ref))
+ return;
+ nvme_end_request(rq, req->status, req->result);
+}
+
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
const char *op)
{
@@ -1173,16 +1182,11 @@ static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvme_rdma_request *req =
container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
-
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
@@ -1547,15 +1551,11 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
struct nvme_rdma_request *req =
container_of(qe, struct nvme_rdma_request, sqe);
- struct request *rq = blk_mq_rq_from_pdu(req);
- if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ if (unlikely(wc->status != IB_WC_SUCCESS))
nvme_rdma_wr_error(cq, wc, "SEND");
- return;
- }
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
+ else
+ nvme_rdma_end_request(req);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
@@ -1694,11 +1694,9 @@ static void nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
nvme_rdma_error_recovery(queue->ctrl);
}
/* the local invalidation completion will end the request */
- return;
+ } else {
+ nvme_rdma_end_request(req);
}
-
- if (refcount_dec_and_test(&req->ref))
- nvme_end_request(rq, req->status, req->result);
}
static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)