diff options
author | Christoph Hellwig <hch@lst.de> | 2016-11-10 16:32:34 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-11-10 18:06:26 +0100 |
commit | 7bf58533a0bc257edff883619befe7e5a1e8caca (patch) | |
tree | 0ffacc930004c83ee064ae0bb9a035b6744a04e4 /drivers/nvme | |
parent | nvme: introduce struct nvme_request (diff) | |
download | linux-7bf58533a0bc257edff883619befe7e5a1e8caca.tar.xz linux-7bf58533a0bc257edff883619befe7e5a1e8caca.zip |
nvme: don't pass the full CQE to nvme_complete_async_event
We only need the status and result fields, and passing them explicitly
makes life a lot easier for the Fibre Channel transport which doesn't
have a full CQE for the fast path case.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 19 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 4 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 3 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 3 |
5 files changed, 21 insertions, 11 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2fd632bcd975..53584d21c805 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1895,18 +1895,25 @@ static void nvme_async_event_work(struct work_struct *work) spin_unlock_irq(&ctrl->lock); } -void nvme_complete_async_event(struct nvme_ctrl *ctrl, - struct nvme_completion *cqe) +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + union nvme_result *res) { - u16 status = le16_to_cpu(cqe->status) >> 1; - u32 result = le32_to_cpu(cqe->result.u32); + u32 result = le32_to_cpu(res->u32); + bool done = true; - if (status == NVME_SC_SUCCESS || status == NVME_SC_ABORT_REQ) { + switch (le16_to_cpu(status) >> 1) { + case NVME_SC_SUCCESS: + done = false; + /*FALLTHRU*/ + case NVME_SC_ABORT_REQ: ++ctrl->event_limit; schedule_work(&ctrl->async_event_work); + break; + default: + break; } - if (status != NVME_SC_SUCCESS) + if (done) return; switch (result & 0xff07) { diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 5e64957a9b96..468fc445bf35 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -275,8 +275,8 @@ void nvme_queue_scan(struct nvme_ctrl *ctrl); void nvme_remove_namespaces(struct nvme_ctrl *ctrl); #define NVME_NR_AERS 1 -void nvme_complete_async_event(struct nvme_ctrl *ctrl, - struct nvme_completion *cqe); +void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, + union nvme_result *res); void nvme_queue_async_events(struct nvme_ctrl *ctrl); void nvme_stop_queues(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index de8e0505d979..51d13d5ec7a8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -703,7 +703,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag) */ if (unlikely(nvmeq->qid == 0 && cqe.command_id >= NVME_AQ_BLKMQ_DEPTH)) { - nvme_complete_async_event(&nvmeq->dev->ctrl, &cqe); + nvme_complete_async_event(&nvmeq->dev->ctrl, + cqe.status, &cqe.result); continue; } diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 0b8a161cf881..c4700efc0390 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1168,7 +1168,8 @@ static int __nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc, int tag) */ if (unlikely(nvme_rdma_queue_idx(queue) == 0 && cqe->command_id >= NVME_RDMA_AQ_BLKMQ_DEPTH)) - nvme_complete_async_event(&queue->ctrl->ctrl, cqe); + nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, + &cqe->result); else ret = nvme_rdma_process_nvme_rsp(queue, cqe, wc, tag); ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 757e21a31128..26aa3a5afb0d 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -127,7 +127,8 @@ static void nvme_loop_queue_response(struct nvmet_req *req) */ if (unlikely(nvme_loop_queue_idx(iod->queue) == 0 && cqe->command_id >= NVME_LOOP_AQ_BLKMQ_DEPTH)) { - nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); + nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe->status, + &cqe->result); } else { struct request *rq = blk_mq_rq_from_pdu(iod); |