summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-10-17 13:58:29 +0200
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 08:28:45 +0100
commita31a97381cdf7dceb03b797a8faf9bc8a01c65d1 (patch)
tree268f86dc23b8338396faf5b891af60a7fb23929f
parentblock: fix empty barrier on write-through w/ ordered tag (diff)
downloadlinux-a31a97381cdf7dceb03b797a8faf9bc8a01c65d1.tar.xz
linux-a31a97381cdf7dceb03b797a8faf9bc8a01c65d1.zip
block: don't use plugging on SSD devices
We just want to hand the first bits of IO to the device as fast as possible. Gains a few percent on the IOPS rate. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-core.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index b1fd4f5f07d3..a824e49c0d0a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -268,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q)
{
if (unlikely(blk_queue_stopped(q)))
return;
-
- if (!blk_remove_plug(q))
+ if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
return;
q->request_fn(q);
@@ -1241,11 +1240,11 @@ get_rq:
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
bio_flagged(bio, BIO_CPU_AFFINE))
req->cpu = blk_cpu_to_group(smp_processor_id());
- if (elv_queue_empty(q))
+ if (!blk_queue_nonrot(q) && elv_queue_empty(q))
blk_plug_device(q);
add_request(q, req);
out:
- if (sync)
+ if (sync || blk_queue_nonrot(q))
__generic_unplug_device(q);
spin_unlock_irq(q->queue_lock);
return 0;