diff options
author | Jan Kara <jack@suse.cz> | 2017-02-02 15:56:50 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-02-02 16:20:48 +0100 |
commit | dc3b17cc8bf21307c7e076e7c778d5db756f7871 (patch) | |
tree | 63a1e6c9b6c70579e9cdabf60147484c0b2f0add /block/blk-core.c | |
parent | block: Unhash block device inodes on gendisk destruction (diff) | |
download | linux-dc3b17cc8bf21307c7e076e7c778d5db756f7871.tar.xz linux-dc3b17cc8bf21307c7e076e7c778d5db756f7871.zip |
block: Use pointer to backing_dev_info from request_queue
We will want to have struct backing_dev_info allocated separately from
struct request_queue. As the first step add pointer to backing_dev_info
to request_queue and convert all users touching it. No functional
changes in this patch.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 27 |
1 files changed, 14 insertions, 13 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 3266daaa343f..dcac0352c14c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -75,7 +75,7 @@ static void blk_clear_congested(struct request_list *rl, int sync) * flip its congestion state for events on other blkcgs. */ if (rl == &rl->q->root_rl) - clear_wb_congested(rl->q->backing_dev_info.wb.congested, sync); + clear_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } @@ -86,7 +86,7 @@ static void blk_set_congested(struct request_list *rl, int sync) #else /* see blk_clear_congested() */ if (rl == &rl->q->root_rl) - set_wb_congested(rl->q->backing_dev_info.wb.congested, sync); + set_wb_congested(rl->q->backing_dev_info->wb.congested, sync); #endif } @@ -117,7 +117,7 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - return &q->backing_dev_info; + return q->backing_dev_info; } EXPORT_SYMBOL(blk_get_backing_dev_info); @@ -575,7 +575,7 @@ void blk_cleanup_queue(struct request_queue *q) blk_flush_integrity(); /* @q won't process any more request, flush async actions */ - del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); + del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); blk_sync_queue(q); if (q->mq_ops) @@ -587,7 +587,7 @@ void blk_cleanup_queue(struct request_queue *q) q->queue_lock = &q->__queue_lock; spin_unlock_irq(lock); - bdi_unregister(&q->backing_dev_info); + bdi_unregister(q->backing_dev_info); /* @q is and will stay empty, shutdown and put */ blk_put_queue(q); @@ -728,17 +728,18 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) if (!q->bio_split) goto fail_id; - q->backing_dev_info.ra_pages = + q->backing_dev_info = &q->_backing_dev_info; + q->backing_dev_info->ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; - q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; - q->backing_dev_info.name = "block"; + q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; + q->backing_dev_info->name = "block"; q->node = node_id; - err = bdi_init(&q->backing_dev_info); + err = bdi_init(q->backing_dev_info); if (err) goto fail_split; - setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, + setup_timer(&q->backing_dev_info->laptop_mode_wb_timer, laptop_mode_timer_fn, (unsigned long) q); setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); INIT_LIST_HEAD(&q->queue_head); @@ -788,7 +789,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) fail_ref: percpu_ref_exit(&q->q_usage_counter); fail_bdi: - bdi_destroy(&q->backing_dev_info); + bdi_destroy(q->backing_dev_info); fail_split: bioset_free(q->bio_split); fail_id: @@ -1182,7 +1183,7 @@ fail_elvpriv: * disturb iosched and blkcg but weird is bettern than dead. */ printk_ratelimited(KERN_WARNING "%s: dev %s: request aux data allocation failed, iosched may be disturbed\n", - __func__, dev_name(q->backing_dev_info.dev)); + __func__, dev_name(q->backing_dev_info->dev)); rq->rq_flags &= ~RQF_ELVPRIV; rq->elv.icq = NULL; @@ -2659,7 +2660,7 @@ void blk_finish_request(struct request *req, int error) BUG_ON(blk_queued_rq(req)); if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) - laptop_io_completion(&req->q->backing_dev_info); + laptop_io_completion(req->q->backing_dev_info); blk_delete_timer(req); |