diff options
author | Dan Williams <dan.j.williams@intel.com> | 2011-07-23 20:44:25 +0200 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-07-23 20:44:25 +0200 |
commit | 5757a6d76cdf6dda2a492c09b985c015e86779b1 (patch) | |
tree | 6356a6353639eb473dd917a1b2062f9e7e20de22 /block/blk-sysfs.c | |
parent | backing-dev: use synchronize_rcu_expedited instead of synchronize_rcu (diff) | |
download | linux-5757a6d76cdf6dda2a492c09b985c015e86779b1.tar.xz linux-5757a6d76cdf6dda2a492c09b985c015e86779b1.zip |
block: strict rq_affinity
Some systems benefit from completions always being steered to the strict
requester cpu rather than the looser "per-socket" steering that
blk_cpu_to_group() attempts by default. This is because the first
CPU in the group mask ends up being completely overloaded with work,
while the others (including the original submitter) has power left
to spare.
Allow the strict mode to be set by writing '2' to the sysfs control
file. This is identical to the scheme used for the nomerges file,
where '2' is a more aggressive setting than just being turned on.
echo 2 > /sys/block/<bdev>/queue/rq_affinity
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Roland Dreier <roland@purestorage.com>
Tested-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index d935bd859c87..0ee17b5e7fb6 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -244,8 +244,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) { bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); + bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); - return queue_var_show(set, page); + return queue_var_show(set << force, page); } static ssize_t @@ -257,10 +258,14 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) ret = queue_var_store(&val, page, count); spin_lock_irq(q->queue_lock); - if (val) + if (val) { queue_flag_set(QUEUE_FLAG_SAME_COMP, q); - else - queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); + if (val == 2) + queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); + } else { + queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); + queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); + } spin_unlock_irq(q->queue_lock); #endif return ret; |