diff options
author | Jens Axboe <axboe@fb.com> | 2016-12-09 21:08:35 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-12-09 21:08:35 +0100 |
commit | 7cd54aa8438947602cf68eda1db327822b9b8e6b (patch) | |
tree | 42c0d2935f29a7644425146bd2ea08f436d70e8e | |
parent | blk-flush: run the queue when inserting blk-mq flush (diff) | |
download | linux-7cd54aa8438947602cf68eda1db327822b9b8e6b.tar.xz linux-7cd54aa8438947602cf68eda1db327822b9b8e6b.zip |
blk-stat: fix a few cases of missing batch flushing
Everytime we need to read ->nr_samples, we should have flushed
the batch first. The non-mq read path also needs to flush the
batch.
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-stat.c | 8 |
1 files changed, 8 insertions, 0 deletions
diff --git a/block/blk-stat.c b/block/blk-stat.c index 4d0118568727..9b43efb8933f 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c @@ -64,6 +64,9 @@ static void blk_mq_stat_get(struct request_queue *q, struct blk_rq_stat *dst) queue_for_each_hw_ctx(q, hctx, i) { hctx_for_each_ctx(hctx, ctx, j) { + blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]); + blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]); + if (!ctx->stat[BLK_STAT_READ].nr_samples && !ctx->stat[BLK_STAT_WRITE].nr_samples) continue; @@ -111,6 +114,8 @@ void blk_queue_stat_get(struct request_queue *q, struct blk_rq_stat *dst) if (q->mq_ops) blk_mq_stat_get(q, dst); else { + blk_stat_flush_batch(&q->rq_stats[BLK_STAT_READ]); + blk_stat_flush_batch(&q->rq_stats[BLK_STAT_WRITE]); memcpy(&dst[BLK_STAT_READ], &q->rq_stats[BLK_STAT_READ], sizeof(struct blk_rq_stat)); memcpy(&dst[BLK_STAT_WRITE], &q->rq_stats[BLK_STAT_WRITE], @@ -128,6 +133,9 @@ void blk_hctx_stat_get(struct blk_mq_hw_ctx *hctx, struct blk_rq_stat *dst) uint64_t newest = 0; hctx_for_each_ctx(hctx, ctx, i) { + blk_stat_flush_batch(&ctx->stat[BLK_STAT_READ]); + blk_stat_flush_batch(&ctx->stat[BLK_STAT_WRITE]); + if (!ctx->stat[BLK_STAT_READ].nr_samples && !ctx->stat[BLK_STAT_WRITE].nr_samples) continue; |