summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2019-10-07 23:16:51 +0200
committerJens Axboe <axboe@kernel.dk>2019-10-08 05:19:10 +0200
commit8148f0b5647a831c5d94b59da240c8e76dbacae9 (patch)
tree2f9511fed5b78056a63ea1c8104e27f79f69823a
parentblk-mq: Embed counters into struct mq_inflight (diff)
downloadlinux-8148f0b5647a831c5d94b59da240c8e76dbacae9.tar.xz
linux-8148f0b5647a831c5d94b59da240c8e76dbacae9.zip
blk-stat: Optimise blk_stat_add()
blk_stat_add() calls {get,put}_cpu_ptr() in a loop, which entails overhead of disabling/enabling preemption. The loop is under RCU (i.e.short) anyway, so do get_cpu() in advance. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-stat.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c
index 940f15d600f8..7da302ff88d0 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now)
struct request_queue *q = rq->q;
struct blk_stat_callback *cb;
struct blk_rq_stat *stat;
- int bucket;
+ int bucket, cpu;
u64 value;
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now)
blk_throtl_stat_add(rq, value);
rcu_read_lock();
+ cpu = get_cpu();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (!blk_stat_is_active(cb))
continue;
@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now)
if (bucket < 0)
continue;
- stat = &get_cpu_ptr(cb->cpu_stat)[bucket];
+ stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
blk_rq_stat_add(stat, value);
- put_cpu_ptr(cb->cpu_stat);
}
+ put_cpu();
rcu_read_unlock();
}