summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2020-04-28 16:21:56 +0200
committerJens Axboe <axboe@kernel.dk>2020-05-10 00:18:36 +0200
commit74943d45eef4db64b1e5c9f7ad1d018576e113c5 (patch)
tree1a0d4c8bc6d51ede2b4d4c66f6198b847bc537de
parentnvme: flush scan work on passthrough commands (diff)
downloadlinux-74943d45eef4db64b1e5c9f7ad1d018576e113c5.tar.xz
linux-74943d45eef4db64b1e5c9f7ad1d018576e113c5.zip
nvme-pci: remove volatile cqes
The completion queue entry is not volatile once the phase is confirmed. Remove the volatile keywords and check the phase using the appropriate READ_ONCE() accessor, allowing the compiler to optimize the remaining completion path. Signed-off-by: Keith Busch <kbusch@kernel.org> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--drivers/nvme/host/pci.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e13c370de830..e95c7465c7bd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -166,7 +166,7 @@ struct nvme_queue {
void *sq_cmds;
/* only used for poll queues: */
spinlock_t cq_poll_lock ____cacheline_aligned_in_smp;
- volatile struct nvme_completion *cqes;
+ struct nvme_completion *cqes;
dma_addr_t sq_dma_addr;
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
@@ -922,8 +922,9 @@ static void nvme_pci_complete_rq(struct request *req)
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
- return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) ==
- nvmeq->cq_phase;
+ struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head];
+
+ return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase;
}
static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq)
@@ -944,7 +945,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
- volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ struct nvme_completion *cqe = &nvmeq->cqes[idx];
struct request *req;
if (unlikely(cqe->command_id >= nvmeq->q_depth)) {