diff options
author | Mike Snitzer <snitzer@redhat.com> | 2020-09-29 22:27:21 +0200 |
---|---|---|
committer | Mike Snitzer <snitzer@redhat.com> | 2020-09-29 22:31:35 +0200 |
commit | 1471308fb5ec4335f9ae9fc65f65048dbe7c336e (patch) | |
tree | 69aef90f47105e1c730e5277f352d3d5446a1174 /drivers/md | |
parent | dm crypt: document encrypted keyring key option (diff) | |
parent | block-mq: fix comments in blk_mq_queue_tag_busy_iter (diff) | |
download | linux-1471308fb5ec4335f9ae9fc65f65048dbe7c336e.tar.xz linux-1471308fb5ec4335f9ae9fc65f65048dbe7c336e.zip |
Merge remote-tracking branch 'jens/for-5.10/block' into dm-5.10
DM depends on these block 5.10 commits:
22ada802ede8 block: use lcm_not_zero() when stacking chunk_sectors
07d098e6bbad block: allow 'chunk_sectors' to be non-power-of-2
021a24460dc2 block: add QUEUE_FLAG_NOWAIT
6abc49468eea dm: add support for REQ_NOWAIT and enable it for linear target
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/request.c | 10 | ||||
-rw-r--r-- | drivers/md/bcache/super.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-linear.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-raid.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 43 | ||||
-rw-r--r-- | drivers/md/dm.c | 19 | ||||
-rw-r--r-- | drivers/md/md-cluster.c | 6 | ||||
-rw-r--r-- | drivers/md/md-linear.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 29 | ||||
-rw-r--r-- | drivers/md/md.h | 4 | ||||
-rw-r--r-- | drivers/md/raid0.c | 16 | ||||
-rw-r--r-- | drivers/md/raid10.c | 46 | ||||
-rw-r--r-- | drivers/md/raid5.c | 31 |
13 files changed, 102 insertions, 116 deletions
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index c7cadaafa947..7f54ae223644 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -475,6 +475,7 @@ struct search { unsigned int read_dirty_data:1; unsigned int cache_missed:1; + struct hd_struct *part; unsigned long start_time; struct btree_op op; @@ -669,7 +670,7 @@ static void bio_complete(struct search *s) { if (s->orig_bio) { /* Count on bcache device */ - disk_end_io_acct(s->d->disk, bio_op(s->orig_bio), s->start_time); + part_end_io_acct(s->part, s->orig_bio, s->start_time); trace_bcache_request_end(s->d, s->orig_bio); s->orig_bio->bi_status = s->iop.status; @@ -731,7 +732,7 @@ static inline struct search *search_alloc(struct bio *bio, s->write = op_is_write(bio_op(bio)); s->read_dirty_data = 0; /* Count on the bcache device */ - s->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio)); + s->start_time = part_start_io_acct(d->disk, &s->part, bio); s->iop.c = d->c; s->iop.bio = NULL; s->iop.inode = d->id; @@ -1072,6 +1073,7 @@ struct detached_dev_io_private { unsigned long start_time; bio_end_io_t *bi_end_io; void *bi_private; + struct hd_struct *part; }; static void detached_dev_end_io(struct bio *bio) @@ -1083,7 +1085,7 @@ static void detached_dev_end_io(struct bio *bio) bio->bi_private = ddip->bi_private; /* Count on the bcache device */ - disk_end_io_acct(ddip->d->disk, bio_op(bio), ddip->start_time); + part_end_io_acct(ddip->part, bio, ddip->start_time); if (bio->bi_status) { struct cached_dev *dc = container_of(ddip->d, @@ -1109,7 +1111,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); ddip->d = d; /* Count on the bcache device */ - ddip->start_time = disk_start_io_acct(d->disk, bio_sectors(bio), bio_op(bio)); + ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); ddip->bi_end_io = bio->bi_end_io; ddip->bi_private = bio->bi_private; bio->bi_end_io = detached_dev_end_io; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 1bbdc410ee3c..6bfa77167362 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1427,9 +1427,8 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size) if (ret) return ret; - dc->disk.disk->queue->backing_dev_info->ra_pages = - max(dc->disk.disk->queue->backing_dev_info->ra_pages, - q->backing_dev_info->ra_pages); + blk_queue_io_opt(dc->disk.disk->queue, + max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q))); atomic_set(&dc->io_errors, 0); dc->io_disable = false; diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index e1db43446327..00774b5d7668 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -228,10 +228,11 @@ static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, #ifdef CONFIG_BLK_DEV_ZONED - .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM, + .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT | + DM_TARGET_ZONED_HM, .report_zones = linear_report_zones, #else - .features = DM_TARGET_PASSES_INTEGRITY, + .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT, #endif .module = THIS_MODULE, .ctr = linear_ctr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 8d2b835d7a10..56b723d012ac 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -701,7 +701,7 @@ static void rs_set_capacity(struct raid_set *rs) struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); set_capacity(gendisk, rs->md.array_sectors); - revalidate_disk(gendisk); + revalidate_disk_size(gendisk, true); } /* diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 229f461e7def..c3be7cb2570c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -907,7 +907,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, struct request_queue *q = bdev_get_queue(bdev); /* request-based cannot stack on partitions! */ - if (bdev != bdev->bd_contains) + if (bdev_is_partition(bdev)) return false; return queue_is_mq(q); @@ -1752,6 +1752,33 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t) return true; } +static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !blk_queue_nowait(q); +} + +static bool dm_table_supports_nowait(struct dm_table *t) +{ + struct dm_target *ti; + unsigned i = 0; + + while (i < dm_table_get_num_targets(t)) { + ti = dm_table_get_target(t, i++); + + if (!dm_target_supports_nowait(ti->type)) + return false; + + if (!ti->type->iterate_devices || + ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) + return false; + } + + return true; +} + static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1819,7 +1846,7 @@ static int device_requires_stable_pages(struct dm_target *ti, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && bdi_cap_stable_pages_required(q->backing_dev_info); + return q && blk_queue_stable_writes(q); } /* @@ -1854,6 +1881,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, */ q->limits = *limits; + if (dm_table_supports_nowait(t)) + blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q); + else + blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); + if (!dm_table_supports_discards(t)) { blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); /* Must also clear discard limits... */ @@ -1904,9 +1936,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * because they do their own checksumming. */ if (dm_table_requires_stable_pages(t)) - q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); else - q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; + blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); /* * Determine whether or not this queue's I/O timings contribute @@ -1929,8 +1961,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } #endif - /* Allow reads to exceed readahead limits */ - q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); + blk_queue_update_readahead(q); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6ed05ca65a0f..977a962fa0bb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1786,7 +1786,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio) if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { dm_put_live_table(md, srcu_idx); - if (!(bio->bi_opf & REQ_RAHEAD)) + if (bio->bi_opf & REQ_NOWAIT) + bio_wouldblock_error(bio); + else if (!(bio->bi_opf & REQ_RAHEAD)) queue_io(md, bio); else bio_io_error(bio); @@ -2082,18 +2084,6 @@ static void event_callback(void *context) } /* - * Protected by md->suspend_lock obtained by dm_swap_table(). - */ -static void __set_size(struct mapped_device *md, sector_t size) -{ - lockdep_assert_held(&md->suspend_lock); - - set_capacity(md->disk, size); - - i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); -} - -/* * Returns old map, which caller must destroy. */ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, @@ -2115,7 +2105,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, if (size != dm_get_size(md)) memset(&md->geometry, 0, sizeof(md->geometry)); - __set_size(md, size); + set_capacity(md->disk, size); + bd_set_nr_sectors(md->bdev, size); dm_table_event_callback(t, event_callback, md); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index d50737ec4039..0580b51a156a 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -582,7 +582,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg) break; case CHANGE_CAPACITY: set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); break; case RESYNCING: set_bit(MD_RESYNCING_REMOTE, &mddev->recovery); @@ -1296,12 +1296,12 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors) pr_err("%s:%d: failed to send CHANGE_CAPACITY msg\n", __func__, __LINE__); set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); } else { /* revert to previous sectors */ ret = mddev->pers->resize(mddev, old_dev_sectors); if (!ret) - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); ret = __sendmsg(cinfo, &cmsg); if (ret) pr_err("%s:%d: failed to send METADATA_UPDATED msg\n", diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index c2ae9125c4c3..5ab22069b5be 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -202,7 +202,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev) md_set_array_sectors(mddev, linear_size(mddev, 0, 0)); set_capacity(mddev->gendisk, mddev->array_sectors); mddev_resume(mddev); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); kfree_rcu(oldconf, rcu); return 0; } diff --git a/drivers/md/md.c b/drivers/md/md.c index 607278207023..de8419b7ae98 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -464,6 +464,7 @@ struct md_io { bio_end_io_t *orig_bi_end_io; void *orig_bi_private; unsigned long start_time; + struct hd_struct *part; }; static void md_end_io(struct bio *bio) @@ -471,7 +472,7 @@ static void md_end_io(struct bio *bio) struct md_io *md_io = bio->bi_private; struct mddev *mddev = md_io->mddev; - disk_end_io_acct(mddev->gendisk, bio_op(bio), md_io->start_time); + part_end_io_acct(md_io->part, bio, md_io->start_time); bio->bi_end_io = md_io->orig_bi_end_io; bio->bi_private = md_io->orig_bi_private; @@ -517,9 +518,8 @@ static blk_qc_t md_submit_bio(struct bio *bio) bio->bi_end_io = md_end_io; bio->bi_private = md_io; - md_io->start_time = disk_start_io_acct(mddev->gendisk, - bio_sectors(bio), - bio_op(bio)); + md_io->start_time = part_start_io_acct(mddev->gendisk, + &md_io->part, bio); } /* bio could be mergeable after passing to underlayer */ @@ -2322,8 +2322,7 @@ static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) test_bit(Journal, &rdev2->flags) || rdev2->raid_disk == -1) continue; - if (rdev->bdev->bd_contains == - rdev2->bdev->bd_contains) { + if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { rcu_read_unlock(); return 1; } @@ -5358,7 +5357,7 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len) mddev->array_sectors = sectors; if (mddev->pers) { set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); } } mddev_unlock(mddev); @@ -5944,8 +5943,8 @@ int md_run(struct mddev *mddev) rdev_for_each(rdev, mddev) rdev_for_each(rdev2, mddev) { if (rdev < rdev2 && - rdev->bdev->bd_contains == - rdev2->bdev->bd_contains) { + rdev->bdev->bd_disk == + rdev2->bdev->bd_disk) { pr_warn("%s: WARNING: %s appears to be on the same physical disk as %s.\n", mdname(mddev), bdevname(rdev->bdev,b), @@ -6109,7 +6108,7 @@ int do_md_run(struct mddev *mddev) md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); clear_bit(MD_NOT_READY, &mddev->flags); mddev->changed = 1; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); @@ -6427,7 +6426,7 @@ static int do_md_stop(struct mddev *mddev, int mode, set_capacity(disk, 0); mutex_unlock(&mddev->open_mutex); mddev->changed = 1; - revalidate_disk(disk); + revalidate_disk_size(disk, true); if (mddev->ro) mddev->ro = 0; @@ -7259,7 +7258,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) md_cluster_ops->update_size(mddev, old_dev_sectors); else if (mddev->queue) { set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); } } return rv; @@ -7848,7 +7847,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) atomic_inc(&mddev->openers); mutex_unlock(&mddev->open_mutex); - check_disk_change(bdev); + bdev_check_media_change(bdev); out: if (err) mddev_put(mddev); @@ -8445,7 +8444,7 @@ static int is_mddev_idle(struct mddev *mddev, int init) idle = 1; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { - struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + struct gendisk *disk = rdev->bdev->bd_disk; curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - atomic_read(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats @@ -9018,7 +9017,7 @@ void md_do_sync(struct md_thread *thread) mddev_unlock(mddev); if (!mddev_is_clustered(mddev)) { set_capacity(mddev->gendisk, mddev->array_sectors); - revalidate_disk(mddev->gendisk); + revalidate_disk_size(mddev->gendisk, true); } } diff --git a/drivers/md/md.h b/drivers/md/md.h index d9c4e6b7e939..2175a5ac4f7c 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -397,7 +397,7 @@ struct mddev { * These locks are separate due to conflicting interactions * with bdev->bd_mutex. * Lock ordering is: - * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk + * reconfig_mutex -> bd_mutex * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open */ struct mutex open_mutex; @@ -551,7 +551,7 @@ extern void mddev_unlock(struct mddev *mddev); static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + atomic_add(nr_sectors, &bdev->bd_disk->sync_io); } static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index f54a449f97aa..aa2d72791768 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -410,22 +410,6 @@ static int raid0_run(struct mddev *mddev) mdname(mddev), (unsigned long long)mddev->array_sectors); - if (mddev->queue) { - /* calculate the max read-ahead size. - * For read-ahead of large files to be effective, we need to - * readahead at least twice a whole stripe. i.e. number of devices - * multiplied by chunk size times 2. - * If an individual device has an ra_pages greater than the - * chunk size, then we will not drive that device as hard as it - * wants. We consider this a configuration error: a larger - * chunksize should be used in that case. - */ - int stripe = mddev->raid_disks * - (mddev->chunk_sectors << 9) / PAGE_SIZE; - if (mddev->queue->backing_dev_info->ra_pages < 2* stripe) - mddev->queue->backing_dev_info->ra_pages = 2* stripe; - } - dump_zones(mddev); ret = md_integrity_register(mddev); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e8fa32733917..5d1bdee313ec 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3703,10 +3703,20 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } +static void raid10_set_io_opt(struct r10conf *conf) +{ + int raid_disks = conf->geo.raid_disks; + + if (!(conf->geo.raid_disks % conf->geo.near_copies)) + raid_disks /= conf->geo.near_copies; + blk_queue_io_opt(conf->mddev->queue, (conf->mddev->chunk_sectors << 9) * + raid_disks); +} + static int raid10_run(struct mddev *mddev) { struct r10conf *conf; - int i, disk_idx, chunk_size; + int i, disk_idx; struct raid10_info *disk; struct md_rdev *rdev; sector_t size; @@ -3742,18 +3752,13 @@ static int raid10_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; - chunk_size = mddev->chunk_sectors << 9; if (mddev->queue) { blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_same_sectors(mddev->queue, 0); blk_queue_max_write_zeroes_sectors(mddev->queue, 0); - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); + raid10_set_io_opt(conf); } rdev_for_each(rdev, mddev) { @@ -3868,19 +3873,6 @@ static int raid10_run(struct mddev *mddev) mddev->resync_max_sectors = size; set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); - if (mddev->queue) { - int stripe = conf->geo.raid_disks * - ((mddev->chunk_sectors << 9) / PAGE_SIZE); - - /* Calculate max read-ahead size. - * We need to readahead at least twice a whole stripe.... - * maybe... - */ - stripe /= conf->geo.near_copies; - if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) - mddev->queue->backing_dev_info->ra_pages = 2 * stripe; - } - if (md_integrity_register(mddev)) goto out_free_conf; @@ -4718,16 +4710,8 @@ static void end_reshape(struct r10conf *conf) conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); - /* read-ahead size must cover two whole stripes, which is - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices - */ - if (conf->mddev->queue) { - int stripe = conf->geo.raid_disks * - ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE); - stripe /= conf->geo.near_copies; - if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; - } + if (conf->mddev->queue) + raid10_set_io_opt(conf); conf->fullsync = 0; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 225380efd1e2..d589d26c86ea 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6638,14 +6638,14 @@ raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) if (!conf) err = -ENODEV; else if (new != conf->skip_copy) { + struct request_queue *q = mddev->queue; + mddev_suspend(mddev); conf->skip_copy = new; if (new) - mddev->queue->backing_dev_info->capabilities |= - BDI_CAP_STABLE_WRITES; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); else - mddev->queue->backing_dev_info->capabilities &= - ~BDI_CAP_STABLE_WRITES; + blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); mddev_resume(mddev); } mddev_unlock(mddev); @@ -7232,6 +7232,12 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } +static void raid5_set_io_opt(struct r5conf *conf) +{ + blk_queue_io_opt(conf->mddev->queue, (conf->chunk_sectors << 9) * + (conf->raid_disks - conf->max_degraded)); +} + static int raid5_run(struct mddev *mddev) { struct r5conf *conf; @@ -7516,13 +7522,10 @@ static int raid5_run(struct mddev *mddev) int data_disks = conf->previous_raid_disks - conf->max_degraded; int stripe = data_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); - if (mddev->queue->backing_dev_info->ra_pages < 2 * stripe) - mddev->queue->backing_dev_info->ra_pages = 2 * stripe; chunk_size = mddev->chunk_sectors << 9; blk_queue_io_min(mddev->queue, chunk_size); - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->raid_disks - conf->max_degraded)); + raid5_set_io_opt(conf); mddev->queue->limits.raid_partial_stripes_expensive = 1; /* * We can only discard a whole stripe. It doesn't make sense to @@ -8106,16 +8109,8 @@ static void end_reshape(struct r5conf *conf) spin_unlock_irq(&conf->device_lock); wake_up(&conf->wait_for_overlap); - /* read-ahead size must cover two whole stripes, which is - * 2 * (datadisks) * chunksize where 'n' is the number of raid devices - */ - if (conf->mddev->queue) { - int data_disks = conf->raid_disks - conf->max_degraded; - int stripe = data_disks * ((conf->chunk_sectors << 9) - / PAGE_SIZE); - if (conf->mddev->queue->backing_dev_info->ra_pages < 2 * stripe) - conf->mddev->queue->backing_dev_info->ra_pages = 2 * stripe; - } + if (conf->mddev->queue) + raid5_set_io_opt(conf); } } |