diff options
author | Josef Bacik <jbacik@fb.com> | 2018-07-03 17:14:57 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-07-09 17:07:54 +0200 |
commit | 2ecbf456352d0699f51b4c6d70ea5bf29766579c (patch) | |
tree | 3f7dcf7c71cda4d74d1a8371741cb498e43c02ff /block/blk-stat.c | |
parent | memcontrol: schedule throttling if we are congested (diff) | |
download | linux-2ecbf456352d0699f51b4c6d70ea5bf29766579c.tar.xz linux-2ecbf456352d0699f51b4c6d70ea5bf29766579c.zip |
blk-stat: export helpers for modifying blk_rq_stat
We need to use blk_rq_stat in the blkcg qos stuff, so export some of
these helpers so they can be used by other things.
Signed-off-by: Josef Bacik <jbacik@fb.com>
Acked-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-stat.c')
-rw-r--r-- | block/blk-stat.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c index 175c143ac5b9..7587b1c3caaf 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -17,7 +17,7 @@ struct blk_queue_stats { bool enable_accounting; }; -static void blk_stat_init(struct blk_rq_stat *stat) +void blk_rq_stat_init(struct blk_rq_stat *stat) { stat->min = -1ULL; stat->max = stat->nr_samples = stat->mean = 0; @@ -25,7 +25,7 @@ static void blk_stat_init(struct blk_rq_stat *stat) } /* src is a per-cpu stat, mean isn't initialized */ -static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) +void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) { if (!src->nr_samples) return; @@ -39,7 +39,7 @@ static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) dst->nr_samples += src->nr_samples; } -static void __blk_stat_add(struct blk_rq_stat *stat, u64 value) +void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value) { stat->min = min(stat->min, value); stat->max = max(stat->max, value); @@ -69,7 +69,7 @@ void blk_stat_add(struct request *rq, u64 now) continue; stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; - __blk_stat_add(stat, value); + blk_rq_stat_add(stat, value); put_cpu_ptr(cb->cpu_stat); } rcu_read_unlock(); @@ -82,15 +82,15 @@ static void blk_stat_timer_fn(struct timer_list *t) int cpu; for (bucket = 0; bucket < cb->buckets; bucket++) - blk_stat_init(&cb->stat[bucket]); + blk_rq_stat_init(&cb->stat[bucket]); for_each_online_cpu(cpu) { struct blk_rq_stat *cpu_stat; cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) { - blk_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); - blk_stat_init(&cpu_stat[bucket]); + blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]); + blk_rq_stat_init(&cpu_stat[bucket]); } } @@ -143,7 +143,7 @@ void blk_stat_add_callback(struct request_queue *q, cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu); for (bucket = 0; bucket < cb->buckets; bucket++) - blk_stat_init(&cpu_stat[bucket]); + blk_rq_stat_init(&cpu_stat[bucket]); } spin_lock(&q->stats->lock); |