summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorMatthew Wilcox <matthew.r.wilcox@intel.com>2013-06-24 18:03:57 +0200
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2013-06-24 19:57:27 +0200
commit7d8224574cbd2326a6be00f319f5f7597abec3f6 (patch)
treefcba1402f98bffa6a38f2a00d104e43e865804c8 /drivers/block
parentNVMe: Remove "process_cq did something" message (diff)
downloadlinux-7d8224574cbd2326a6be00f319f5f7597abec3f6.tar.xz
linux-7d8224574cbd2326a6be00f319f5f7597abec3f6.zip
NVMe: Call nvme_process_cq from submission path
Since we have the queue locked, it makes sense to check if there are any completion queue entries on the queue before we release the lock. If there are, it may save an interrupt and reduce latency for the I/Os that happened to complete. This happens fairly often for some workloads. Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index eb4a91f3bf41..07d527c66eb4 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -738,25 +738,6 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
return result;
}
-static void nvme_make_request(struct request_queue *q, struct bio *bio)
-{
- struct nvme_ns *ns = q->queuedata;
- struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
- int result = -EBUSY;
-
- spin_lock_irq(&nvmeq->q_lock);
- if (bio_list_empty(&nvmeq->sq_cong))
- result = nvme_submit_bio_queue(nvmeq, ns, bio);
- if (unlikely(result)) {
- if (bio_list_empty(&nvmeq->sq_cong))
- add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
- bio_list_add(&nvmeq->sq_cong, bio);
- }
-
- spin_unlock_irq(&nvmeq->q_lock);
- put_nvmeq(nvmeq);
-}
-
static int nvme_process_cq(struct nvme_queue *nvmeq)
{
u16 head, phase;
@@ -797,6 +778,26 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
return 1;
}
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ nvme_process_cq(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
+}
+
static irqreturn_t nvme_irq(int irq, void *data)
{
irqreturn_t result;