diff options
Diffstat (limited to 'drivers/md')
31 files changed, 440 insertions, 420 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 097577ae3c47..ce13c272c387 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -336,7 +336,7 @@ static int bch_allocator_thread(void *arg) mutex_unlock(&ca->set->bucket_lock); blkdev_issue_discard(ca->bdev, bucket_to_sector(ca->set, bucket), - ca->sb.bucket_size, GFP_KERNEL, 0); + ca->sb.bucket_size, GFP_KERNEL); mutex_lock(&ca->set->bucket_lock); } diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 6230dfdd9286..7510d1c983a5 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -107,15 +107,16 @@ void bch_btree_verify(struct btree *b) void bch_data_verify(struct cached_dev *dc, struct bio *bio) { + unsigned int nr_segs = bio_segments(bio); struct bio *check; struct bio_vec bv, cbv; struct bvec_iter iter, citer = { 0 }; - check = bio_kmalloc(GFP_NOIO, bio_segments(bio)); + check = bio_kmalloc(nr_segs, GFP_NOIO); if (!check) return; - bio_set_dev(check, bio->bi_bdev); - check->bi_opf = REQ_OP_READ; + bio_init(check, bio->bi_bdev, check->bi_inline_vecs, nr_segs, + REQ_OP_READ); check->bi_iter.bi_sector = bio->bi_iter.bi_sector; check->bi_iter.bi_size = bio->bi_iter.bi_size; @@ -146,7 +147,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) bio_free_pages(check); out_put: - bio_put(check); + bio_uninit(check); + kfree(check); } #endif diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 7c2ca52ca3e4..df5347ea450b 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -771,12 +771,12 @@ static void journal_write_unlocked(struct closure *cl) bio_reset(bio, ca->bdev, REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PREFLUSH | REQ_FUA); - bch_bio_map(bio, w->data); bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_iter.bi_size = sectors << 9; bio->bi_end_io = journal_write_endio; bio->bi_private = w; + bch_bio_map(bio, w->data); trace_bcache_journal_write(bio, w->data->keys); bio_list_add(&list, bio); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index fdd0194f84dd..9c5dde73da88 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -685,7 +685,7 @@ static void do_bio_hook(struct search *s, { struct bio *bio = &s->bio.bio; - bio_init_clone(bio->bi_bdev, bio, orig_bio, GFP_NOIO); + bio_init_clone(orig_bio->bi_bdev, bio, orig_bio, GFP_NOIO); /* * bi_end_io can be set separately somewhere else, e.g. the * variants in, @@ -1005,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) bio_get(s->iop.bio); if (bio_op(bio) == REQ_OP_DISCARD && - !blk_queue_discard(bdev_get_queue(dc->bdev))) + !bdev_max_discard_sectors(dc->bdev)) goto insert_data; /* I/O request sent to backing device */ @@ -1115,7 +1115,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, bio->bi_private = ddip; if ((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->bdev))) + !bdev_max_discard_sectors(dc->bdev)) bio->bi_end_io(bio); else submit_bio_noacct(bio); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index bf3de149d3c9..2f49e31142f6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -973,7 +973,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size, blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); - blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); blk_queue_write_cache(q, true, true); @@ -2350,7 +2349,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk, ca->bdev->bd_holder = ca; ca->sb_disk = sb_disk; - if (blk_queue_discard(bdev_get_queue(bdev))) + if (bdev_max_discard_sectors((bdev))) ca->discard = CACHE_DISCARD(&ca->sb); ret = cache_alloc(ca); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index d1029d71ff3b..c6f677059214 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -1151,7 +1151,7 @@ STORE(__bch_cache) if (attr == &sysfs_discard) { bool v = strtoul_or_return(buf); - if (blk_queue_discard(bdev_get_queue(ca->bdev))) + if (bdev_max_discard_sectors(ca->bdev)) ca->discard = v; if (v != CACHE_DISCARD(&ca->sb)) { diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index e9cbc70d5a0e..5ffa1dcf84cf 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -611,7 +611,8 @@ static void bio_complete(struct bio *bio) { struct dm_buffer *b = bio->bi_private; blk_status_t status = bio->bi_status; - bio_put(bio); + bio_uninit(bio); + kfree(bio); b->end_io(b, status); } @@ -626,16 +627,14 @@ static void use_bio(struct dm_buffer *b, int rw, sector_t sector, if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT)) vec_size += 2; - bio = bio_kmalloc(GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN, vec_size); + bio = bio_kmalloc(vec_size, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN); if (!bio) { dmio: use_dmio(b, rw, sector, n_sectors, offset); return; } - + bio_init(bio, b->c->bdev, bio->bi_inline_vecs, vec_size, rw); bio->bi_iter.bi_sector = sector; - bio_set_dev(bio, b->c->bdev); - bio_set_op_attrs(bio, rw, 0); bio->bi_end_io = bio_complete; bio->bi_private = b; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 780a61bc6cc0..28c5de8eca4a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -3329,13 +3329,6 @@ static int cache_iterate_devices(struct dm_target *ti, return r; } -static bool origin_dev_supports_discard(struct block_device *origin_bdev) -{ - struct request_queue *q = bdev_get_queue(origin_bdev); - - return blk_queue_discard(q); -} - /* * If discard_passdown was enabled verify that the origin device * supports discards. Disable discard_passdown if not. @@ -3349,7 +3342,7 @@ static void disable_passdown_if_not_supported(struct cache *cache) if (!cache->features.discard_passdown) return; - if (!origin_dev_supports_discard(origin_bdev)) + if (!bdev_max_discard_sectors(origin_bdev)) reason = "discard unsupported"; else if (origin_limits->max_discard_sectors < cache->sectors_per_block) diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 128316a73d01..811b0a5379d0 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2016,13 +2016,6 @@ static void clone_resume(struct dm_target *ti) do_waker(&clone->waker.work); } -static bool bdev_supports_discards(struct block_device *bdev) -{ - struct request_queue *q = bdev_get_queue(bdev); - - return (q && blk_queue_discard(q)); -} - /* * If discard_passdown was enabled verify that the destination device supports * discards. Disable discard_passdown if not. @@ -2036,7 +2029,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) return; - if (!bdev_supports_discards(dest_dev)) + if (!bdev_max_discard_sectors(dest_dev)) reason = "discard unsupported"; else if (dest_limits->max_discard_sectors < clone->region_size) reason = "max discard sectors smaller than a region"; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index ad2d5faa2ebb..36ae30b73a6e 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -4399,6 +4399,7 @@ try_smaller_buffer: } if (ic->internal_hash) { + size_t recalc_tags_size; ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); if (!ic->recalc_wq ) { ti->error = "Cannot allocate workqueue"; @@ -4412,8 +4413,10 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } - ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, - ic->tag_size, GFP_KERNEL); + recalc_tags_size = (RECALC_SECTORS >> ic->sb->log2_sectors_per_block) * ic->tag_size; + if (crypto_shash_digestsize(ic->internal_hash) > ic->tag_size) + recalc_tags_size += crypto_shash_digestsize(ic->internal_hash) - ic->tag_size; + ic->recalc_tags = kvmalloc(recalc_tags_size, GFP_KERNEL); if (!ic->recalc_tags) { ti->error = "Cannot allocate tags for recalculating"; r = -ENOMEM; diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 5762366333a2..e4b95eaeec8c 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -311,7 +311,7 @@ static void do_region(int op, int op_flags, unsigned region, * Reject unsupported discard and write same requests. */ if (op == REQ_OP_DISCARD) - special_cmd_max_sectors = q->limits.max_discard_sectors; + special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev); else if (op == REQ_OP_WRITE_ZEROES) special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) && diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index c9d036d6bb2e..e194226c89e5 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -866,9 +866,8 @@ static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv, static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct log_writes_c *lc = ti->private; - struct request_queue *q = bdev_get_queue(lc->dev->bdev); - if (!q || !blk_queue_discard(q)) { + if (!bdev_max_discard_sectors(lc->dev->bdev)) { lc->device_supports_discard = false; limits->discard_granularity = lc->sectorsize; limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c index 875bca30a0dd..82f2a06153dc 100644 --- a/drivers/md/dm-ps-historical-service-time.c +++ b/drivers/md/dm-ps-historical-service-time.c @@ -27,7 +27,6 @@ #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/module.h> -#include <linux/sched/clock.h> #define DM_MSG_PREFIX "multipath historical-service-time" @@ -433,7 +432,7 @@ static struct dm_path *hst_select_path(struct path_selector *ps, { struct selector *s = ps->context; struct path_info *pi = NULL, *best = NULL; - u64 time_now = sched_clock(); + u64 time_now = ktime_get_ns(); struct dm_path *ret = NULL; unsigned long flags; @@ -474,7 +473,7 @@ static int hst_start_io(struct path_selector *ps, struct dm_path *path, static u64 path_service_time(struct path_info *pi, u64 start_time) { - u64 sched_now = ktime_get_ns(); + u64 now = ktime_get_ns(); /* if a previous disk request has finished after this IO was * sent to the hardware, pretend the submission happened @@ -483,11 +482,11 @@ static u64 path_service_time(struct path_info *pi, u64 start_time) if (time_after64(pi->last_finish, start_time)) start_time = pi->last_finish; - pi->last_finish = sched_now; - if (time_before64(sched_now, start_time)) + pi->last_finish = now; + if (time_before64(now, start_time)) return 0; - return sched_now - start_time; + return now - start_time; } static int hst_end_io(struct path_selector *ps, struct dm_path *path, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 2b26435a6946..9526ccbedafb 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2963,13 +2963,8 @@ static void configure_discard_support(struct raid_set *rs) raid456 = rs_is_raid456(rs); for (i = 0; i < rs->raid_disks; i++) { - struct request_queue *q; - - if (!rs->dev[i].rdev.bdev) - continue; - - q = bdev_get_queue(rs->dev[i].rdev.bdev); - if (!q || !blk_queue_discard(q)) + if (!rs->dev[i].rdev.bdev || + !bdev_max_discard_sectors(rs->dev[i].rdev.bdev)) return; if (raid456) { diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 03541cfc2317..e7d42f6335a2 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1820,9 +1820,7 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !blk_queue_nonrot(q); + return !bdev_nonrot(dev->bdev); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1890,9 +1888,7 @@ static bool dm_table_supports_nowait(struct dm_table *t) static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !blk_queue_discard(q); + return !bdev_max_discard_sectors(dev->bdev); } static bool dm_table_supports_discards(struct dm_table *t) @@ -1924,9 +1920,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return !blk_queue_secure_erase(q); + return !bdev_max_secure_erase_sectors(dev->bdev); } static bool dm_table_supports_secure_erase(struct dm_table *t) @@ -1952,9 +1946,7 @@ static int device_requires_stable_pages(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { - struct request_queue *q = bdev_get_queue(dev->bdev); - - return blk_queue_stable_writes(q); + return bdev_stable_writes(dev->bdev); } int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, @@ -1974,18 +1966,15 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q); if (!dm_table_supports_discards(t)) { - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); - /* Must also clear discard limits... */ q->limits.max_discard_sectors = 0; q->limits.max_hw_discard_sectors = 0; q->limits.discard_granularity = 0; q->limits.discard_alignment = 0; q->limits.discard_misaligned = 0; - } else - blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); + } - if (dm_table_supports_secure_erase(t)) - blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); + if (!dm_table_supports_secure_erase(t)) + q->limits.max_secure_erase_sectors = 0; if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 4d25d0e27031..84c083f76673 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -398,8 +398,8 @@ static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t da sector_t s = block_to_sectors(tc->pool, data_b); sector_t len = block_to_sectors(tc->pool, data_e - data_b); - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, - GFP_NOWAIT, 0, &op->bio); + return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOWAIT, + &op->bio); } static void end_discard(struct discard_op *op, int r) @@ -2802,13 +2802,6 @@ static void requeue_bios(struct pool *pool) /*---------------------------------------------------------------- * Binding of control targets to a pool object *--------------------------------------------------------------*/ -static bool data_dev_supports_discard(struct pool_c *pt) -{ - struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); - - return blk_queue_discard(q); -} - static bool is_factor(sector_t block_size, uint32_t n) { return !sector_div(block_size, n); @@ -2828,7 +2821,7 @@ static void disable_passdown_if_not_supported(struct pool_c *pt) if (!pt->adjusted_pf.discard_passdown) return; - if (!data_dev_supports_discard(pt)) + if (!bdev_max_discard_sectors(pt->data_dev->bdev)) reason = "discard unsupported"; else if (data_limits->max_discard_sectors < pool->sectors_per_block) @@ -4057,8 +4050,6 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) /* * Must explicitly disallow stacking discard limits otherwise the * block layer will stack them if pool's data device has support. - * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the - * user to see that, so make sure to set all discard limits to 0. */ limits->discard_granularity = 0; return; diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index c1ca9be4b79e..57daa86c19cf 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -360,16 +360,20 @@ static int dm_update_zone_wp_offset(struct mapped_device *md, unsigned int zno, return 0; } +struct orig_bio_details { + unsigned int op; + unsigned int nr_sectors; +}; + /* * First phase of BIO mapping for targets with zone append emulation: * check all BIO that change a zone writer pointer and change zone * append operations into regular write operations. */ static bool dm_zone_map_bio_begin(struct mapped_device *md, - struct bio *orig_bio, struct bio *clone) + unsigned int zno, struct bio *clone) { sector_t zsectors = blk_queue_zone_sectors(md->queue); - unsigned int zno = bio_zone_no(orig_bio); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* @@ -384,7 +388,7 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, WRITE_ONCE(md->zwp_offset[zno], zwp_offset); } - switch (bio_op(orig_bio)) { + switch (bio_op(clone)) { case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_FINISH: return true; @@ -401,9 +405,8 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, * target zone. */ clone->bi_opf = REQ_OP_WRITE | REQ_NOMERGE | - (orig_bio->bi_opf & (~REQ_OP_MASK)); - clone->bi_iter.bi_sector = - orig_bio->bi_iter.bi_sector + zwp_offset; + (clone->bi_opf & (~REQ_OP_MASK)); + clone->bi_iter.bi_sector += zwp_offset; break; default: DMWARN_LIMIT("Invalid BIO operation"); @@ -423,11 +426,10 @@ static bool dm_zone_map_bio_begin(struct mapped_device *md, * data written to a zone. Note that at this point, the remapped clone BIO * may already have completed, so we do not touch it. */ -static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, - struct bio *orig_bio, +static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, unsigned int zno, + struct orig_bio_details *orig_bio_details, unsigned int nr_sectors) { - unsigned int zno = bio_zone_no(orig_bio); unsigned int zwp_offset = READ_ONCE(md->zwp_offset[zno]); /* The clone BIO may already have been completed and failed */ @@ -435,7 +437,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, return BLK_STS_IOERR; /* Update the zone wp offset */ - switch (bio_op(orig_bio)) { + switch (orig_bio_details->op) { case REQ_OP_ZONE_RESET: WRITE_ONCE(md->zwp_offset[zno], 0); return BLK_STS_OK; @@ -452,7 +454,7 @@ static blk_status_t dm_zone_map_bio_end(struct mapped_device *md, * Check that the target did not truncate the write operation * emulating a zone append. */ - if (nr_sectors != bio_sectors(orig_bio)) { + if (nr_sectors != orig_bio_details->nr_sectors) { DMWARN_LIMIT("Truncated write for zone append"); return BLK_STS_IOERR; } @@ -488,7 +490,7 @@ static inline void dm_zone_unlock(struct request_queue *q, bio_clear_flag(clone, BIO_ZONE_WRITE_LOCKED); } -static bool dm_need_zone_wp_tracking(struct bio *orig_bio) +static bool dm_need_zone_wp_tracking(struct bio *bio) { /* * Special processing is not needed for operations that do not need the @@ -496,15 +498,15 @@ static bool dm_need_zone_wp_tracking(struct bio *orig_bio) * zones and all operations that do not modify directly a sequential * zone write pointer. */ - if (op_is_flush(orig_bio->bi_opf) && !bio_sectors(orig_bio)) + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return false; - switch (bio_op(orig_bio)) { + switch (bio_op(bio)) { case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE: case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_APPEND: - return bio_zone_is_seq(orig_bio); + return bio_zone_is_seq(bio); default: return false; } @@ -519,8 +521,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) struct dm_target *ti = tio->ti; struct mapped_device *md = io->md; struct request_queue *q = md->queue; - struct bio *orig_bio = io->orig_bio; struct bio *clone = &tio->clone; + struct orig_bio_details orig_bio_details; unsigned int zno; blk_status_t sts; int r; @@ -529,18 +531,21 @@ int dm_zone_map_bio(struct dm_target_io *tio) * IOs that do not change a zone write pointer do not need * any additional special processing. */ - if (!dm_need_zone_wp_tracking(orig_bio)) + if (!dm_need_zone_wp_tracking(clone)) return ti->type->map(ti, clone); /* Lock the target zone */ - zno = bio_zone_no(orig_bio); + zno = bio_zone_no(clone); dm_zone_lock(q, zno, clone); + orig_bio_details.nr_sectors = bio_sectors(clone); + orig_bio_details.op = bio_op(clone); + /* * Check that the bio and the target zone write pointer offset are * both valid, and if the bio is a zone append, remap it to a write. */ - if (!dm_zone_map_bio_begin(md, orig_bio, clone)) { + if (!dm_zone_map_bio_begin(md, zno, clone)) { dm_zone_unlock(q, zno, clone); return DM_MAPIO_KILL; } @@ -560,7 +565,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) * The target submitted the clone BIO. The target zone will * be unlocked on completion of the clone. */ - sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr); + sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, + *tio->len_ptr); break; case DM_MAPIO_REMAPPED: /* @@ -568,7 +574,8 @@ int dm_zone_map_bio(struct dm_target_io *tio) * unlock the target zone here as the clone will not be * submitted. */ - sts = dm_zone_map_bio_end(md, orig_bio, *tio->len_ptr); + sts = dm_zone_map_bio_end(md, zno, &orig_bio_details, + *tio->len_ptr); if (sts != BLK_STS_OK) dm_zone_unlock(q, zno, clone); break; diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c index cac295cc8840..0ec5d8b9b1a4 100644 --- a/drivers/md/dm-zoned-target.c +++ b/drivers/md/dm-zoned-target.c @@ -1001,7 +1001,7 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, DMZ_BLOCK_SIZE); blk_limits_io_opt(limits, DMZ_BLOCK_SIZE); - limits->discard_alignment = DMZ_BLOCK_SIZE; + limits->discard_alignment = 0; limits->discard_granularity = DMZ_BLOCK_SIZE; limits->max_discard_sectors = chunk_sectors; limits->max_hw_discard_sectors = chunk_sectors; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 3c5fad7c4ee6..39081338ca61 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -955,7 +955,6 @@ void disable_discard(struct mapped_device *md) /* device doesn't really support DISCARD, disable it */ limits->max_discard_sectors = 0; - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); } void disable_write_zeroes(struct mapped_device *md) @@ -982,7 +981,7 @@ static void clone_endio(struct bio *bio) if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && - !q->limits.max_discard_sectors) + !bdev_max_discard_sectors(bio->bi_bdev)) disable_discard(md); else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && !q->limits.max_write_zeroes_sectors) @@ -1323,8 +1322,7 @@ static void __map_bio(struct bio *clone) } static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, - struct dm_target *ti, unsigned num_bios, - unsigned *len) + struct dm_target *ti, unsigned num_bios) { struct bio *bio; int try; @@ -1335,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci, if (try) mutex_lock(&ci->io->md->table_devices_lock); for (bio_nr = 0; bio_nr < num_bios; bio_nr++) { - bio = alloc_tio(ci, ti, bio_nr, len, + bio = alloc_tio(ci, ti, bio_nr, NULL, try ? GFP_NOIO : GFP_NOWAIT); if (!bio) break; @@ -1363,11 +1361,11 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, break; case 1: clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); - dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); __map_bio(clone); break; default: - alloc_multiple_bios(&blist, ci, ti, num_bios, len); + /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */ + alloc_multiple_bios(&blist, ci, ti, num_bios); while ((clone = bio_list_pop(&blist))) { dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO); __map_bio(clone); @@ -1392,6 +1390,7 @@ static void __send_empty_flush(struct clone_info *ci) ci->bio = &flush_bio; ci->sector_count = 0; + ci->io->tio.clone.bi_iter.bi_size = 0; while ((ti = dm_table_get_target(ci->map, target_nr++))) __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); @@ -1407,14 +1406,10 @@ static void __send_changing_extent_only(struct clone_info *ci, struct dm_target len = min_t(sector_t, ci->sector_count, max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); - /* - * dm_accept_partial_bio cannot be used with duplicate bios, - * so update clone_info cursor before __send_duplicate_bios(). - */ + __send_duplicate_bios(ci, ti, num_bios, &len); + ci->sector += len; ci->sector_count -= len; - - __send_duplicate_bios(ci, ti, num_bios, &len); } static bool is_abnormal_io(struct bio *bio) diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index bfd6026d7809..d87f674ab762 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -639,14 +639,6 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - /* Setup nodes/clustername only if bitmap version is - * cluster-compatible - */ - if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { - nodes = le32_to_cpu(sb->nodes); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, - sb->cluster_name, 64); - } /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -668,6 +660,16 @@ re_read: goto out; } + /* + * Setup nodes/clustername only if bitmap version is + * cluster-compatible + */ + if (sb->version == cpu_to_le32(BITMAP_MAJOR_CLUSTERED)) { + nodes = le32_to_cpu(sb->nodes); + strscpy(bitmap->mddev->bitmap_info.cluster_name, + sb->cluster_name, 64); + } + /* keep the array size field of the bitmap superblock up to date */ sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors); @@ -695,14 +697,13 @@ re_read: if (le32_to_cpu(sb->version) == BITMAP_MAJOR_HOSTENDIAN) set_bit(BITMAP_HOSTENDIAN, &bitmap->flags); bitmap->events_cleared = le64_to_cpu(sb->events_cleared); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); err = 0; out: kunmap_atomic(sb); - /* Assigning chunksize is required for "re_read" */ - bitmap->mddev->bitmap_info.chunksize = chunksize; if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { + /* Assigning chunksize is required for "re_read" */ + bitmap->mddev->bitmap_info.chunksize = chunksize; err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_warn("%s: Could not setup cluster service (%d)\n", @@ -713,18 +714,18 @@ out: goto re_read; } - out_no_sb: - if (test_bit(BITMAP_STALE, &bitmap->flags)) - bitmap->events_cleared = bitmap->mddev->events; - bitmap->mddev->bitmap_info.chunksize = chunksize; - bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; - bitmap->mddev->bitmap_info.max_write_behind = write_behind; - bitmap->mddev->bitmap_info.nodes = nodes; - if (bitmap->mddev->bitmap_info.space == 0 || - bitmap->mddev->bitmap_info.space > sectors_reserved) - bitmap->mddev->bitmap_info.space = sectors_reserved; - if (err) { + if (err == 0) { + if (test_bit(BITMAP_STALE, &bitmap->flags)) + bitmap->events_cleared = bitmap->mddev->events; + bitmap->mddev->bitmap_info.chunksize = chunksize; + bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep; + bitmap->mddev->bitmap_info.max_write_behind = write_behind; + bitmap->mddev->bitmap_info.nodes = nodes; + if (bitmap->mddev->bitmap_info.space == 0 || + bitmap->mddev->bitmap_info.space > sectors_reserved) + bitmap->mddev->bitmap_info.space = sectors_reserved; + } else { md_bitmap_print_sb(bitmap); if (bitmap->cluster_slot < 0) md_cluster_stop(bitmap->mddev); diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 1c8a06b77c85..37cbcce3cc66 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -201,7 +201,7 @@ static struct dlm_lock_resource *lockres_init(struct mddev *mddev, pr_err("md-cluster: Unable to allocate resource name for resource %s\n", name); goto out_err; } - strlcpy(res->name, name, namelen + 1); + strscpy(res->name, name, namelen + 1); if (with_lvb) { res->lksb.sb_lvbptr = kzalloc(LVB_SIZE, GFP_KERNEL); if (!res->lksb.sb_lvbptr) { diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 0f55b079371b..138a3b25c5c8 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -64,7 +64,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) struct linear_conf *conf; struct md_rdev *rdev; int i, cnt; - bool discard_supported = false; conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); if (!conf) @@ -96,9 +95,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) conf->array_sectors += rdev->sectors; cnt++; - - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; } if (cnt != raid_disks) { pr_warn("md/linear:%s: not enough drives present. Aborting!\n", @@ -106,11 +102,6 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) goto out; } - if (!discard_supported) - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); - else - blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); - /* * Here we calculate the device offsets. */ @@ -252,7 +243,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) start_sector + data_offset; if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) { + !bdev_max_discard_sectors(bio->bi_bdev))) { /* Just ignore it */ bio_endio(bio); } else { diff --git a/drivers/md/md.c b/drivers/md/md.c index 309b3af906ad..707e802d0082 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2627,14 +2627,16 @@ static void sync_sbs(struct mddev *mddev, int nospares) static bool does_sb_need_changing(struct mddev *mddev) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; struct mdp_superblock_1 *sb; int role; /* Find a good rdev */ - rdev_for_each(rdev, mddev) - if ((rdev->raid_disk >= 0) && !test_bit(Faulty, &rdev->flags)) + rdev_for_each(iter, mddev) + if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { + rdev = iter; break; + } /* No good device found. */ if (!rdev) @@ -2645,11 +2647,11 @@ static bool does_sb_need_changing(struct mddev *mddev) rdev_for_each(rdev, mddev) { role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); /* Device activated? */ - if (role == 0xffff && rdev->raid_disk >=0 && + if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags)) return true; /* Device turned faulty? */ - if (test_bit(Faulty, &rdev->flags) && (role < 0xfffd)) + if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX)) return true; } @@ -2984,10 +2986,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (cmd_match(buf, "faulty") && rdev->mddev->pers) { md_error(rdev->mddev, rdev); - if (test_bit(Faulty, &rdev->flags)) - err = 0; - else + + if (test_bit(MD_BROKEN, &rdev->mddev->flags)) err = -EBUSY; + else + err = 0; } else if (cmd_match(buf, "remove")) { if (rdev->mddev->pers) { clear_bit(Blocked, &rdev->flags); @@ -4028,7 +4031,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) oldpriv = mddev->private; mddev->pers = pers; mddev->private = priv; - strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); + strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); mddev->level = mddev->new_level; mddev->layout = mddev->new_layout; mddev->chunk_sectors = mddev->new_chunk_sectors; @@ -4353,10 +4356,9 @@ __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, * like active, but no writes have been seen for a while (100msec). * * broken - * RAID0/LINEAR-only: same as clean, but array is missing a member. - * It's useful because RAID0/LINEAR mounted-arrays aren't stopped - * when a member is gone, so this state will at least alert the - * user that something is wrong. +* Array is failed. It's useful because mounted-arrays aren't stopped +* when array is failed, so this state will at least alert the user that +* something is wrong. */ enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, write_pending, active_idle, broken, bad_word}; @@ -5763,7 +5765,7 @@ static int add_named_array(const char *val, const struct kernel_param *kp) len--; if (len >= DISK_NAME_LEN) return -E2BIG; - strlcpy(buf, val, len+1); + strscpy(buf, val, len+1); if (strncmp(buf, "md_", 3) == 0) return md_alloc(0, buf); if (strncmp(buf, "md", 2) == 0 && @@ -5896,7 +5898,7 @@ int md_run(struct mddev *mddev) mddev->level = pers->level; mddev->new_level = pers->level; } - strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); + strscpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); if (mddev->reshape_position != MaxSector && pers->start_reshape == NULL) { @@ -5991,8 +5993,7 @@ int md_run(struct mddev *mddev) bool nonrot = true; rdev_for_each(rdev, mddev) { - if (rdev->raid_disk >= 0 && - !blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { + if (rdev->raid_disk >= 0 && !bdev_nonrot(rdev->bdev)) { nonrot = false; break; } @@ -7444,7 +7445,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev) err = -ENODEV; else { md_error(mddev, rdev); - if (!test_bit(Faulty, &rdev->flags)) + if (test_bit(MD_BROKEN, &mddev->flags)) err = -EBUSY; } rcu_read_unlock(); @@ -7985,13 +7986,16 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev) if (!mddev->pers || !mddev->pers->error_handler) return; - mddev->pers->error_handler(mddev,rdev); - if (mddev->degraded) + mddev->pers->error_handler(mddev, rdev); + + if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); sysfs_notify_dirent_safe(rdev->sysfs_state); set_bit(MD_RECOVERY_INTR, &mddev->recovery); - set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_wakeup_thread(mddev->thread); + if (!test_bit(MD_BROKEN, &mddev->flags)) { + set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); + md_wakeup_thread(mddev->thread); + } if (mddev->event_work.func) queue_work(md_misc_wq, &mddev->event_work); md_new_event(); @@ -8585,7 +8589,7 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, { struct bio *discard_bio = NULL; - if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 0, + if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, &discard_bio) || !discard_bio) return; @@ -9671,7 +9675,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); if (test_bit(Candidate, &rdev2->flags)) { - if (role == 0xfffe) { + if (role == MD_DISK_ROLE_FAULTY) { pr_info("md: Removing Candidate device %s because add failed\n", bdevname(rdev2->bdev,b)); md_kick_rdev_from_array(rdev2); continue; @@ -9684,7 +9688,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) /* * got activated except reshape is happening. */ - if (rdev2->raid_disk == -1 && role != 0xffff && + if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { rdev2->saved_raid_disk = role; @@ -9701,7 +9705,8 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) * as faulty. The recovery is performed by the * one who initiated the error. */ - if ((role == 0xfffe) || (role == 0xfffd)) { + if (role == MD_DISK_ROLE_FAULTY || + role == MD_DISK_ROLE_JOURNAL) { md_error(mddev, rdev2); clear_bit(Blocked, &rdev2->flags); } @@ -9791,16 +9796,18 @@ static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) void md_reload_sb(struct mddev *mddev, int nr) { - struct md_rdev *rdev; + struct md_rdev *rdev = NULL, *iter; int err; /* Find the rdev */ - rdev_for_each_rcu(rdev, mddev) { - if (rdev->desc_nr == nr) + rdev_for_each_rcu(iter, mddev) { + if (iter->desc_nr == nr) { + rdev = iter; break; + } } - if (!rdev || rdev->desc_nr != nr) { + if (!rdev) { pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); return; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 6ac283864533..cf2cbb17acbd 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -234,34 +234,42 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; -/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */ +/** + * enum mddev_flags - md device flags. + * @MD_ARRAY_FIRST_USE: First use of array, needs initialization. + * @MD_CLOSING: If set, we are closing the array, do not open it then. + * @MD_JOURNAL_CLEAN: A raid with journal is already clean. + * @MD_HAS_JOURNAL: The raid array has journal feature set. + * @MD_CLUSTER_RESYNC_LOCKED: cluster raid only, which means node, already took + * resync lock, need to release the lock. + * @MD_FAILFAST_SUPPORTED: Using MD_FAILFAST on metadata writes is supported as + * calls to md_error() will never cause the array to + * become failed. + * @MD_HAS_PPL: The raid array has PPL feature set. + * @MD_HAS_MULTIPLE_PPLS: The raid array has multiple PPLs feature set. + * @MD_ALLOW_SB_UPDATE: md_check_recovery is allowed to update the metadata + * without taking reconfig_mutex. + * @MD_UPDATING_SB: md_check_recovery is updating the metadata without + * explicitly holding reconfig_mutex. + * @MD_NOT_READY: do_md_run() is active, so 'array_state', ust not report that + * array is ready yet. + * @MD_BROKEN: This is used to stop writes and mark array as failed. + * + * change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added + */ enum mddev_flags { - MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ - MD_CLOSING, /* If set, we are closing the array, do not open - * it then */ - MD_JOURNAL_CLEAN, /* A raid with journal is already clean */ - MD_HAS_JOURNAL, /* The raid array has journal feature set */ - MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node - * already took resync lock, need to - * release the lock */ - MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is - * supported as calls to md_error() will - * never cause the array to become failed. - */ - MD_HAS_PPL, /* The raid array has PPL feature set */ - MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */ - MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update - * the metadata without taking reconfig_mutex. - */ - MD_UPDATING_SB, /* md_check_recovery is updating the metadata - * without explicitly holding reconfig_mutex. - */ - MD_NOT_READY, /* do_md_run() is active, so 'array_state' - * must not report that array is ready yet - */ - MD_BROKEN, /* This is used in RAID-0/LINEAR only, to stop - * I/O in case an array member is gone/failed. - */ + MD_ARRAY_FIRST_USE, + MD_CLOSING, + MD_JOURNAL_CLEAN, + MD_HAS_JOURNAL, + MD_CLUSTER_RESYNC_LOCKED, + MD_FAILFAST_SUPPORTED, + MD_HAS_PPL, + MD_HAS_MULTIPLE_PPLS, + MD_ALLOW_SB_UPDATE, + MD_UPDATING_SB, + MD_NOT_READY, + MD_BROKEN, }; enum mddev_sb_flags { diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index b21e101183f4..e11701e394ca 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -128,21 +128,6 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) pr_debug("md/raid0:%s: FINAL %d zones\n", mdname(mddev), conf->nr_strip_zones); - if (conf->nr_strip_zones == 1) { - conf->layout = RAID0_ORIG_LAYOUT; - } else if (mddev->layout == RAID0_ORIG_LAYOUT || - mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { - conf->layout = mddev->layout; - } else if (default_layout == RAID0_ORIG_LAYOUT || - default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { - conf->layout = default_layout; - } else { - pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", - mdname(mddev)); - pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); - err = -ENOTSUPP; - goto abort; - } /* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size @@ -273,6 +258,22 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf) (unsigned long long)smallest->sectors); } + if (conf->nr_strip_zones == 1 || conf->strip_zone[1].nb_dev == 1) { + conf->layout = RAID0_ORIG_LAYOUT; + } else if (mddev->layout == RAID0_ORIG_LAYOUT || + mddev->layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = mddev->layout; + } else if (default_layout == RAID0_ORIG_LAYOUT || + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) { + conf->layout = default_layout; + } else { + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n", + mdname(mddev)); + pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n"); + err = -EOPNOTSUPP; + goto abort; + } + pr_debug("md/raid0:%s: done.\n", mdname(mddev)); *private_conf = conf; @@ -399,7 +400,6 @@ static int raid0_run(struct mddev *mddev) conf = mddev->private; if (mddev->queue) { struct md_rdev *rdev; - bool discard_supported = false; blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); blk_queue_max_write_zeroes_sectors(mddev->queue, mddev->chunk_sectors); @@ -412,13 +412,7 @@ static int raid0_run(struct mddev *mddev) rdev_for_each(rdev, mddev) { disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; } - if (!discard_supported) - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); - else - blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); } /* calculate array device size */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 99d5464a51f8..99d5af1362d7 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -165,9 +165,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) * Allocate bios : 1 for reading, n-1 for writing */ for (j = pi->raid_disks ; j-- ; ) { - bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); + bio = bio_kmalloc(RESYNC_PAGES, gfp_flags); if (!bio) goto out_free_bio; + bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0); r1_bio->bios[j] = bio; } /* @@ -206,8 +207,10 @@ out_free_pages: resync_free_pages(&rps[j]); out_free_bio: - while (++j < pi->raid_disks) - bio_put(r1_bio->bios[j]); + while (++j < pi->raid_disks) { + bio_uninit(r1_bio->bios[j]); + kfree(r1_bio->bios[j]); + } kfree(rps); out_free_r1bio: @@ -225,7 +228,8 @@ static void r1buf_pool_free(void *__r1_bio, void *data) for (i = pi->raid_disks; i--; ) { rp = get_resync_pages(r1bio->bios[i]); resync_free_pages(rp); - bio_put(r1bio->bios[i]); + bio_uninit(r1bio->bios[i]); + kfree(r1bio->bios[i]); } /* resync pages array stored in the 1st bio's .bi_private */ @@ -704,7 +708,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect /* At least two disks to choose from so failfast is OK */ set_bit(R1BIO_FailFast, &r1_bio->state); - nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + nonrot = bdev_nonrot(rdev->bdev); has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); @@ -802,7 +806,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) + !bdev_max_discard_sectors(bio->bi_bdev))) /* Just ignore it */ bio_endio(bio); else @@ -1637,30 +1641,39 @@ static void raid1_status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, "]"); } +/** + * raid1_error() - RAID1 error handler. + * @mddev: affected md device. + * @rdev: member device to fail. + * + * The routine acknowledges &rdev failure and determines new @mddev state. + * If it failed, then: + * - &MD_BROKEN flag is set in &mddev->flags. + * - recovery is disabled. + * Otherwise, it must be degraded: + * - recovery is interrupted. + * - &mddev->degraded is bumped. + * + * @rdev is marked as &Faulty excluding case when array is failed and + * &mddev->fail_last_dev is off. + */ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; unsigned long flags; - /* - * If it is not operational, then we have already marked it as dead - * else if it is the last working disks with "fail_last_dev == false", - * ignore the error, let the next level up know. - * else mark the drive as failed - */ spin_lock_irqsave(&conf->device_lock, flags); - if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev - && (conf->raid_disks - mddev->degraded) == 1) { - /* - * Don't fail the drive, act as though we were just a - * normal single drive. - * However don't try a recovery from this drive as - * it is very likely to fail. - */ - conf->recovery_disabled = mddev->recovery_disabled; - spin_unlock_irqrestore(&conf->device_lock, flags); - return; + + if (test_bit(In_sync, &rdev->flags) && + (conf->raid_disks - mddev->degraded) == 1) { + set_bit(MD_BROKEN, &mddev->flags); + + if (!mddev->fail_last_dev) { + conf->recovery_disabled = mddev->recovery_disabled; + spin_unlock_irqrestore(&conf->device_lock, flags); + return; + } } set_bit(Blocked, &rdev->flags); if (test_and_clear_bit(In_sync, &rdev->flags)) @@ -1826,8 +1839,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } } - if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; } @@ -3106,7 +3117,6 @@ static int raid1_run(struct mddev *mddev) int i; struct md_rdev *rdev; int ret; - bool discard_supported = false; if (mddev->level != 1) { pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n", @@ -3141,8 +3151,6 @@ static int raid1_run(struct mddev *mddev) continue; disk_stack_limits(mddev->gendisk, rdev->bdev, rdev->data_offset << 9); - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; } mddev->degraded = 0; @@ -3179,15 +3187,6 @@ static int raid1_run(struct mddev *mddev) md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); - if (mddev->queue) { - if (discard_supported) - blk_queue_flag_set(QUEUE_FLAG_DISCARD, - mddev->queue); - else - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, - mddev->queue); - } - ret = md_integrity_register(mddev); if (ret) { md_unregister_thread(&mddev->thread); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index dfe7d62d3fbd..dfa576cdf11c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -145,15 +145,17 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) * Allocate bios. */ for (j = nalloc ; j-- ; ) { - bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); + bio = bio_kmalloc(RESYNC_PAGES, gfp_flags); if (!bio) goto out_free_bio; + bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0); r10_bio->devs[j].bio = bio; if (!conf->have_replacement) continue; - bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); + bio = bio_kmalloc(RESYNC_PAGES, gfp_flags); if (!bio) goto out_free_bio; + bio_init(bio, NULL, bio->bi_inline_vecs, RESYNC_PAGES, 0); r10_bio->devs[j].repl_bio = bio; } /* @@ -197,9 +199,11 @@ out_free_pages: out_free_bio: for ( ; j < nalloc; j++) { if (r10_bio->devs[j].bio) - bio_put(r10_bio->devs[j].bio); + bio_uninit(r10_bio->devs[j].bio); + kfree(r10_bio->devs[j].bio); if (r10_bio->devs[j].repl_bio) - bio_put(r10_bio->devs[j].repl_bio); + bio_uninit(r10_bio->devs[j].repl_bio); + kfree(r10_bio->devs[j].repl_bio); } kfree(rps); out_free_r10bio: @@ -220,12 +224,15 @@ static void r10buf_pool_free(void *__r10_bio, void *data) if (bio) { rp = get_resync_pages(bio); resync_free_pages(rp); - bio_put(bio); + bio_uninit(bio); + kfree(bio); } bio = r10bio->devs[j].repl_bio; - if (bio) - bio_put(bio); + if (bio) { + bio_uninit(bio); + kfree(bio); + } } /* resync pages array stored in the 1st bio's .bi_private */ @@ -796,7 +803,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, if (!do_balance) break; - nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + nonrot = bdev_nonrot(rdev->bdev); has_nonrot_disk |= nonrot; pending = atomic_read(&rdev->nr_pending); if (min_pending > pending && nonrot) { @@ -888,7 +895,7 @@ static void flush_pending_writes(struct r10conf *conf) if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) + !bdev_max_discard_sectors(bio->bi_bdev))) /* Just ignore it */ bio_endio(bio); else @@ -1083,7 +1090,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) + !bdev_max_discard_sectors(bio->bi_bdev))) /* Just ignore it */ bio_endio(bio); else @@ -1963,32 +1970,40 @@ static int enough(struct r10conf *conf, int ignore) _enough(conf, 1, ignore); } +/** + * raid10_error() - RAID10 error handler. + * @mddev: affected md device. + * @rdev: member device to fail. + * + * The routine acknowledges &rdev failure and determines new @mddev state. + * If it failed, then: + * - &MD_BROKEN flag is set in &mddev->flags. + * Otherwise, it must be degraded: + * - recovery is interrupted. + * - &mddev->degraded is bumped. + + * @rdev is marked as &Faulty excluding case when array is failed and + * &mddev->fail_last_dev is off. + */ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r10conf *conf = mddev->private; unsigned long flags; - /* - * If it is not operational, then we have already marked it as dead - * else if it is the last working disks with "fail_last_dev == false", - * ignore the error, let the next level up know. - * else mark the drive as failed - */ spin_lock_irqsave(&conf->device_lock, flags); - if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev - && !enough(conf, rdev->raid_disk)) { - /* - * Don't fail the drive, just return an IO error. - */ - spin_unlock_irqrestore(&conf->device_lock, flags); - return; + + if (test_bit(In_sync, &rdev->flags) && !enough(conf, rdev->raid_disk)) { + set_bit(MD_BROKEN, &mddev->flags); + + if (!mddev->fail_last_dev) { + spin_unlock_irqrestore(&conf->device_lock, flags); + return; + } } if (test_and_clear_bit(In_sync, &rdev->flags)) mddev->degraded++; - /* - * If recovery is running, make sure it aborts. - */ + set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); @@ -2144,8 +2159,6 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) rcu_assign_pointer(p->rdev, rdev); break; } - if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; @@ -4069,7 +4082,6 @@ static int raid10_run(struct mddev *mddev) sector_t size; sector_t min_offset_diff = 0; int first = 1; - bool discard_supported = false; if (mddev_init_writes_pending(mddev) < 0) return -ENOMEM; @@ -4140,20 +4152,9 @@ static int raid10_run(struct mddev *mddev) rdev->data_offset << 9); disk->head_position = 0; - - if (blk_queue_discard(bdev_get_queue(rdev->bdev))) - discard_supported = true; first = 0; } - if (mddev->queue) { - if (discard_supported) - blk_queue_flag_set(QUEUE_FLAG_DISCARD, - mddev->queue); - else - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, - mddev->queue); - } /* need to check that every block has at least one working mirror */ if (!enough(conf, -1)) { pr_err("md/raid10:%s: not enough operational mirrors.\n", diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index a7d50ff9020a..094a4042589e 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1318,7 +1318,7 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, r5l_write_super(log, end); - if (!blk_queue_discard(bdev_get_queue(bdev))) + if (!bdev_max_discard_sectors(bdev)) return; mddev = log->rdev->mddev; @@ -1344,14 +1344,14 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, if (log->last_checkpoint < end) { blkdev_issue_discard(bdev, log->last_checkpoint + log->rdev->data_offset, - end - log->last_checkpoint, GFP_NOIO, 0); + end - log->last_checkpoint, GFP_NOIO); } else { blkdev_issue_discard(bdev, log->last_checkpoint + log->rdev->data_offset, log->device_size - log->last_checkpoint, - GFP_NOIO, 0); + GFP_NOIO); blkdev_issue_discard(bdev, log->rdev->data_offset, end, - GFP_NOIO, 0); + GFP_NOIO); } } diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index d3962d92df18..55d065a87b89 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -883,7 +883,9 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, (unsigned long long)r_sector, dd_idx, (unsigned long long)sector); - rdev = conf->disks[dd_idx].rdev; + /* Array has not started so rcu dereference is safe */ + rdev = rcu_dereference_protected( + conf->disks[dd_idx].rdev, 1); if (!rdev || (!test_bit(In_sync, &rdev->flags) && sector >= rdev->recovery_offset)) { pr_debug("%s:%*s data member disk %d missing\n", @@ -934,7 +936,10 @@ static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, parity_sector = raid5_compute_sector(conf, r_sector_first + i, 0, &disk, &sh); BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); - parity_rdev = conf->disks[sh.pd_idx].rdev; + + /* Array has not started so rcu dereference is safe */ + parity_rdev = rcu_dereference_protected( + conf->disks[sh.pd_idx].rdev, 1); BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev); pr_debug("%s:%*s write parity at sector %llu, disk %s\n", @@ -1404,7 +1409,9 @@ int ppl_init_log(struct r5conf *conf) for (i = 0; i < ppl_conf->count; i++) { struct ppl_log *log = &ppl_conf->child_logs[i]; - struct md_rdev *rdev = conf->disks[i].rdev; + /* Array has not started so rcu dereference is safe */ + struct md_rdev *rdev = + rcu_dereference_protected(conf->disks[i].rdev, 1); mutex_init(&log->io_mutex); spin_lock_init(&log->io_list_lock); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 351d341a1ffa..39038fa8b1c8 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -79,18 +79,21 @@ static inline int stripe_hash_locks_hash(struct r5conf *conf, sector_t sect) } static inline void lock_device_hash_lock(struct r5conf *conf, int hash) + __acquires(&conf->device_lock) { spin_lock_irq(conf->hash_locks + hash); spin_lock(&conf->device_lock); } static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) + __releases(&conf->device_lock) { spin_unlock(&conf->device_lock); spin_unlock_irq(conf->hash_locks + hash); } static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) + __acquires(&conf->device_lock) { int i; spin_lock_irq(conf->hash_locks); @@ -100,6 +103,7 @@ static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) } static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) + __releases(&conf->device_lock) { int i; spin_unlock(&conf->device_lock); @@ -164,6 +168,7 @@ static bool stripe_is_lowprio(struct stripe_head *sh) } static void raid5_wakeup_stripe_thread(struct stripe_head *sh) + __must_hold(&sh->raid_conf->device_lock) { struct r5conf *conf = sh->raid_conf; struct r5worker_group *group; @@ -211,6 +216,7 @@ static void raid5_wakeup_stripe_thread(struct stripe_head *sh) static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, struct list_head *temp_inactive_list) + __must_hold(&conf->device_lock) { int i; int injournal = 0; /* number of date pages with R5_InJournal */ @@ -296,6 +302,7 @@ static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, struct list_head *temp_inactive_list) + __must_hold(&conf->device_lock) { if (atomic_dec_and_test(&sh->count)) do_release_stripe(conf, sh, temp_inactive_list); @@ -350,9 +357,9 @@ static void release_inactive_stripe_list(struct r5conf *conf, } } -/* should hold conf->device_lock already */ static int release_stripe_list(struct r5conf *conf, struct list_head *temp_inactive_list) + __must_hold(&conf->device_lock) { struct stripe_head *sh, *t; int count = 0; @@ -629,6 +636,10 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, * This is because some failed devices may only affect one * of the two sections, and some non-in_sync devices may * be insync in the section most affected by failed devices. + * + * Most calls to this function hold &conf->device_lock. Calls + * in raid5_run() do not require the lock as no other threads + * have been started yet. */ int raid5_calc_degraded(struct r5conf *conf) { @@ -686,17 +697,17 @@ int raid5_calc_degraded(struct r5conf *conf) return degraded; } -static int has_failed(struct r5conf *conf) +static bool has_failed(struct r5conf *conf) { - int degraded; + int degraded = conf->mddev->degraded; - if (conf->mddev->reshape_position == MaxSector) - return conf->mddev->degraded > conf->max_degraded; + if (test_bit(MD_BROKEN, &conf->mddev->flags)) + return true; - degraded = raid5_calc_degraded(conf); - if (degraded > conf->max_degraded) - return 1; - return 0; + if (conf->mddev->reshape_position != MaxSector) + degraded = raid5_calc_degraded(conf); + + return degraded > conf->max_degraded; } struct stripe_head * @@ -2648,6 +2659,28 @@ static void shrink_stripes(struct r5conf *conf) conf->slab_cache = NULL; } +/* + * This helper wraps rcu_dereference_protected() and can be used when + * it is known that the nr_pending of the rdev is elevated. + */ +static struct md_rdev *rdev_pend_deref(struct md_rdev __rcu *rdev) +{ + return rcu_dereference_protected(rdev, + atomic_read(&rcu_access_pointer(rdev)->nr_pending)); +} + +/* + * This helper wraps rcu_dereference_protected() and should be used + * when it is known that the mddev_lock() is held. This is safe + * seeing raid5_remove_disk() has the same lock held. + */ +static struct md_rdev *rdev_mdlock_deref(struct mddev *mddev, + struct md_rdev __rcu *rdev) +{ + return rcu_dereference_protected(rdev, + lockdep_is_held(&mddev->reconfig_mutex)); +} + static void raid5_end_read_request(struct bio * bi) { struct stripe_head *sh = bi->bi_private; @@ -2674,9 +2707,9 @@ static void raid5_end_read_request(struct bio * bi) * In that case it moved down to 'rdev'. * rdev is not removed until all requests are finished. */ - rdev = conf->disks[i].replacement; + rdev = rdev_pend_deref(conf->disks[i].replacement); if (!rdev) - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); if (use_new_offset(conf, sh)) s = sh->sector + rdev->new_data_offset; @@ -2790,11 +2823,11 @@ static void raid5_end_write_request(struct bio *bi) for (i = 0 ; i < disks; i++) { if (bi == &sh->dev[i].req) { - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); break; } if (bi == &sh->dev[i].rreq) { - rdev = conf->disks[i].replacement; + rdev = rdev_pend_deref(conf->disks[i].replacement); if (rdev) replacement = 1; else @@ -2802,7 +2835,7 @@ static void raid5_end_write_request(struct bio *bi) * replaced it. rdev is not removed * until all requests are finished. */ - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); break; } } @@ -2863,34 +2896,31 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) unsigned long flags; pr_debug("raid456: error called\n"); + pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n", + mdname(mddev), bdevname(rdev->bdev, b)); + spin_lock_irqsave(&conf->device_lock, flags); + set_bit(Faulty, &rdev->flags); + clear_bit(In_sync, &rdev->flags); + mddev->degraded = raid5_calc_degraded(conf); - if (test_bit(In_sync, &rdev->flags) && - mddev->degraded == conf->max_degraded) { - /* - * Don't allow to achieve failed state - * Don't try to recover this device - */ + if (has_failed(conf)) { + set_bit(MD_BROKEN, &conf->mddev->flags); conf->recovery_disabled = mddev->recovery_disabled; - spin_unlock_irqrestore(&conf->device_lock, flags); - return; + + pr_crit("md/raid:%s: Cannot continue operation (%d/%d failed).\n", + mdname(mddev), mddev->degraded, conf->raid_disks); + } else { + pr_crit("md/raid:%s: Operation continuing on %d devices.\n", + mdname(mddev), conf->raid_disks - mddev->degraded); } - set_bit(Faulty, &rdev->flags); - clear_bit(In_sync, &rdev->flags); - mddev->degraded = raid5_calc_degraded(conf); spin_unlock_irqrestore(&conf->device_lock, flags); set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_mask_bits(&mddev->sb_flags, 0, BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); - pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" - "md/raid:%s: Operation continuing on %d devices.\n", - mdname(mddev), - bdevname(rdev->bdev, b), - mdname(mddev), - conf->raid_disks - mddev->degraded); r5c_update_on_rdev_error(mddev, rdev); } @@ -5213,23 +5243,23 @@ finish: struct r5dev *dev = &sh->dev[i]; if (test_and_clear_bit(R5_WriteError, &dev->flags)) { /* We own a safe reference to the rdev */ - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); if (!rdev_set_badblocks(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0)) md_error(conf->mddev, rdev); rdev_dec_pending(rdev, conf->mddev); } if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); rdev_clear_badblocks(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0); rdev_dec_pending(rdev, conf->mddev); } if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { - rdev = conf->disks[i].replacement; + rdev = rdev_pend_deref(conf->disks[i].replacement); if (!rdev) /* rdev have been moved down */ - rdev = conf->disks[i].rdev; + rdev = rdev_pend_deref(conf->disks[i].rdev); rdev_clear_badblocks(rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0); rdev_dec_pending(rdev, conf->mddev); @@ -5256,6 +5286,7 @@ finish: } static void raid5_activate_delayed(struct r5conf *conf) + __must_hold(&conf->device_lock) { if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { while (!list_empty(&conf->delayed_list)) { @@ -5273,9 +5304,9 @@ static void raid5_activate_delayed(struct r5conf *conf) } static void activate_bit_delay(struct r5conf *conf, - struct list_head *temp_inactive_list) + struct list_head *temp_inactive_list) + __must_hold(&conf->device_lock) { - /* device_lock is held */ struct list_head head; list_add(&head, &conf->bitmap_list); list_del_init(&conf->bitmap_list); @@ -5500,6 +5531,7 @@ static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) * handle_list. */ static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) + __must_hold(&conf->device_lock) { struct stripe_head *sh, *tmp; struct list_head *handle_list = NULL; @@ -6288,7 +6320,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n */ rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); + struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); if (rdev == NULL || test_bit(Faulty, &rdev->flags)) still_degraded = 1; @@ -6371,8 +6403,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio, static int handle_active_stripes(struct r5conf *conf, int group, struct r5worker *worker, struct list_head *temp_inactive_list) - __releases(&conf->device_lock) - __acquires(&conf->device_lock) + __must_hold(&conf->device_lock) { struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; int i, batch_size = 0, hash; @@ -7166,7 +7197,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) int i; int group_cnt; struct r5worker_group *new_group; - int ret; + int ret = -ENOMEM; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -7225,6 +7256,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) spin_lock_init(&conf->device_lock); seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); mutex_init(&conf->cache_size_mutex); + init_waitqueue_head(&conf->wait_for_quiescent); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); @@ -7242,7 +7274,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) rdev_for_each(rdev, mddev) { if (test_bit(Journal, &rdev->flags)) continue; - if (blk_queue_nonrot(bdev_get_queue(rdev->bdev))) { + if (bdev_nonrot(rdev->bdev)) { conf->batch_bio_dispatch = false; break; } @@ -7302,11 +7334,13 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf->level = mddev->new_level; conf->chunk_sectors = mddev->new_chunk_sectors; - if (raid5_alloc_percpu(conf) != 0) + ret = raid5_alloc_percpu(conf); + if (ret) goto abort; pr_debug("raid456: run(%s) called.\n", mdname(mddev)); + ret = -EIO; rdev_for_each(rdev, mddev) { raid_disk = rdev->raid_disk; if (raid_disk >= max_disks @@ -7317,11 +7351,11 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (test_bit(Replacement, &rdev->flags)) { if (disk->replacement) goto abort; - disk->replacement = rdev; + RCU_INIT_POINTER(disk->replacement, rdev); } else { if (disk->rdev) goto abort; - disk->rdev = rdev; + RCU_INIT_POINTER(disk->rdev, rdev); } if (test_bit(In_sync, &rdev->flags)) { @@ -7370,6 +7404,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (grow_stripes(conf, conf->min_nr_stripes)) { pr_warn("md/raid:%s: couldn't allocate %dkB for buffers\n", mdname(mddev), memory); + ret = -ENOMEM; goto abort; } else pr_debug("md/raid:%s: allocated %dkB\n", mdname(mddev), memory); @@ -7383,7 +7418,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) conf->shrinker.count_objects = raid5_cache_count; conf->shrinker.batch = 128; conf->shrinker.flags = 0; - if (register_shrinker(&conf->shrinker)) { + ret = register_shrinker(&conf->shrinker); + if (ret) { pr_warn("md/raid:%s: couldn't register shrinker.\n", mdname(mddev)); goto abort; @@ -7394,17 +7430,16 @@ static struct r5conf *setup_conf(struct mddev *mddev) if (!conf->thread) { pr_warn("md/raid:%s: couldn't allocate thread.\n", mdname(mddev)); + ret = -ENOMEM; goto abort; } return conf; abort: - if (conf) { + if (conf) free_conf(conf); - return ERR_PTR(-EIO); - } else - return ERR_PTR(-ENOMEM); + return ERR_PTR(ret); } static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) @@ -7621,17 +7656,18 @@ static int raid5_run(struct mddev *mddev) for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; i++) { - rdev = conf->disks[i].rdev; + rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); if (!rdev && conf->disks[i].replacement) { /* The replacement is all we have yet */ - rdev = conf->disks[i].replacement; + rdev = rdev_mdlock_deref(mddev, + conf->disks[i].replacement); conf->disks[i].replacement = NULL; clear_bit(Replacement, &rdev->flags); - conf->disks[i].rdev = rdev; + rcu_assign_pointer(conf->disks[i].rdev, rdev); } if (!rdev) continue; - if (conf->disks[i].replacement && + if (rcu_access_pointer(conf->disks[i].replacement) && conf->reshape_progress != MaxSector) { /* replacements and reshape simply do not mix. */ pr_warn("md: cannot handle concurrent replacement and reshape.\n"); @@ -7749,7 +7785,6 @@ static int raid5_run(struct mddev *mddev) */ stripe = stripe * PAGE_SIZE; stripe = roundup_pow_of_two(stripe); - mddev->queue->limits.discard_alignment = stripe; mddev->queue->limits.discard_granularity = stripe; blk_queue_max_write_zeroes_sectors(mddev->queue, 0); @@ -7776,14 +7811,10 @@ static int raid5_run(struct mddev *mddev) * A better idea might be to turn DISCARD into WRITE_ZEROES * requests, as that is required to be safe. */ - if (devices_handle_discard_safely && - mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && - mddev->queue->limits.discard_granularity >= stripe) - blk_queue_flag_set(QUEUE_FLAG_DISCARD, - mddev->queue); - else - blk_queue_flag_clear(QUEUE_FLAG_DISCARD, - mddev->queue); + if (!devices_handle_discard_safely || + mddev->queue->limits.max_discard_sectors < (stripe >> 9) || + mddev->queue->limits.discard_granularity < stripe) + blk_queue_max_discard_sectors(mddev->queue, 0); blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); } @@ -7832,8 +7863,8 @@ static void raid5_status(struct seq_file *seq, struct mddev *mddev) static void print_raid5_conf (struct r5conf *conf) { + struct md_rdev *rdev; int i; - struct disk_info *tmp; pr_debug("RAID conf printout:\n"); if (!conf) { @@ -7844,50 +7875,54 @@ static void print_raid5_conf (struct r5conf *conf) conf->raid_disks, conf->raid_disks - conf->mddev->degraded); + rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { char b[BDEVNAME_SIZE]; - tmp = conf->disks + i; - if (tmp->rdev) + rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev) pr_debug(" disk %d, o:%d, dev:%s\n", - i, !test_bit(Faulty, &tmp->rdev->flags), - bdevname(tmp->rdev->bdev, b)); + i, !test_bit(Faulty, &rdev->flags), + bdevname(rdev->bdev, b)); } + rcu_read_unlock(); } static int raid5_spare_active(struct mddev *mddev) { int i; struct r5conf *conf = mddev->private; - struct disk_info *tmp; + struct md_rdev *rdev, *replacement; int count = 0; unsigned long flags; for (i = 0; i < conf->raid_disks; i++) { - tmp = conf->disks + i; - if (tmp->replacement - && tmp->replacement->recovery_offset == MaxSector - && !test_bit(Faulty, &tmp->replacement->flags) - && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { + rdev = rdev_mdlock_deref(mddev, conf->disks[i].rdev); + replacement = rdev_mdlock_deref(mddev, + conf->disks[i].replacement); + if (replacement + && replacement->recovery_offset == MaxSector + && !test_bit(Faulty, &replacement->flags) + && !test_and_set_bit(In_sync, &replacement->flags)) { /* Replacement has just become active. */ - if (!tmp->rdev - || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) + if (!rdev + || !test_and_clear_bit(In_sync, &rdev->flags)) count++; - if (tmp->rdev) { + if (rdev) { /* Replaced device not technically faulty, * but we need to be sure it gets removed * and never re-added. */ - set_bit(Faulty, &tmp->rdev->flags); + set_bit(Faulty, &rdev->flags); sysfs_notify_dirent_safe( - tmp->rdev->sysfs_state); + rdev->sysfs_state); } - sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); - } else if (tmp->rdev - && tmp->rdev->recovery_offset == MaxSector - && !test_bit(Faulty, &tmp->rdev->flags) - && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { + sysfs_notify_dirent_safe(replacement->sysfs_state); + } else if (rdev + && rdev->recovery_offset == MaxSector + && !test_bit(Faulty, &rdev->flags) + && !test_and_set_bit(In_sync, &rdev->flags)) { count++; - sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); + sysfs_notify_dirent_safe(rdev->sysfs_state); } } spin_lock_irqsave(&conf->device_lock, flags); @@ -7902,8 +7937,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r5conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; - struct md_rdev **rdevp; + struct md_rdev __rcu **rdevp; struct disk_info *p = conf->disks + number; + struct md_rdev *tmp; print_raid5_conf(conf); if (test_bit(Journal, &rdev->flags) && conf->log) { @@ -7921,9 +7957,9 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) log_exit(conf); return 0; } - if (rdev == p->rdev) + if (rdev == rcu_access_pointer(p->rdev)) rdevp = &p->rdev; - else if (rdev == p->replacement) + else if (rdev == rcu_access_pointer(p->replacement)) rdevp = &p->replacement; else return 0; @@ -7943,18 +7979,20 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) if (!test_bit(Faulty, &rdev->flags) && mddev->recovery_disabled != conf->recovery_disabled && !has_failed(conf) && - (!p->replacement || p->replacement == rdev) && + (!rcu_access_pointer(p->replacement) || + rcu_access_pointer(p->replacement) == rdev) && number < conf->raid_disks) { err = -EBUSY; goto abort; } *rdevp = NULL; if (!test_bit(RemoveSynchronized, &rdev->flags)) { + lockdep_assert_held(&mddev->reconfig_mutex); synchronize_rcu(); if (atomic_read(&rdev->nr_pending)) { /* lost the race, try later */ err = -EBUSY; - *rdevp = rdev; + rcu_assign_pointer(*rdevp, rdev); } } if (!err) { @@ -7962,17 +8000,19 @@ static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) if (err) goto abort; } - if (p->replacement) { + + tmp = rcu_access_pointer(p->replacement); + if (tmp) { /* We must have just cleared 'rdev' */ - p->rdev = p->replacement; - clear_bit(Replacement, &p->replacement->flags); + rcu_assign_pointer(p->rdev, tmp); + clear_bit(Replacement, &tmp->flags); smp_mb(); /* Make sure other CPUs may see both as identical * but will never see neither - if they are careful */ - p->replacement = NULL; + rcu_assign_pointer(p->replacement, NULL); if (!err) - err = log_modify(conf, p->rdev, true); + err = log_modify(conf, tmp, true); } clear_bit(WantReplacement, &rdev->flags); @@ -7988,6 +8028,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) int ret, err = -EEXIST; int disk; struct disk_info *p; + struct md_rdev *tmp; int first = 0; int last = conf->raid_disks - 1; @@ -8045,7 +8086,8 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) } for (disk = first; disk <= last; disk++) { p = conf->disks + disk; - if (test_bit(WantReplacement, &p->rdev->flags) && + tmp = rdev_mdlock_deref(mddev, p->rdev); + if (test_bit(WantReplacement, &tmp->flags) && p->replacement == NULL) { clear_bit(In_sync, &rdev->flags); set_bit(Replacement, &rdev->flags); @@ -8336,6 +8378,7 @@ static void end_reshape(struct r5conf *conf) static void raid5_finish_reshape(struct mddev *mddev) { struct r5conf *conf = mddev->private; + struct md_rdev *rdev; if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -8347,10 +8390,12 @@ static void raid5_finish_reshape(struct mddev *mddev) for (d = conf->raid_disks ; d < conf->raid_disks - mddev->delta_disks; d++) { - struct md_rdev *rdev = conf->disks[d].rdev; + rdev = rdev_mdlock_deref(mddev, + conf->disks[d].rdev); if (rdev) clear_bit(In_sync, &rdev->flags); - rdev = conf->disks[d].replacement; + rdev = rdev_mdlock_deref(mddev, + conf->disks[d].replacement); if (rdev) clear_bit(In_sync, &rdev->flags); } diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 9e8486a9e445..638d29863503 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -473,7 +473,8 @@ enum { */ struct disk_info { - struct md_rdev *rdev, *replacement; + struct md_rdev __rcu *rdev; + struct md_rdev __rcu *replacement; struct page *extra_page; /* extra page to use in prexor */ }; @@ -560,6 +561,16 @@ struct r5pending_data { struct bio_list bios; }; +struct raid5_percpu { + struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address + * conversions + */ + int scribble_obj_size; + local_lock_t lock; +}; + struct r5conf { struct hlist_head *stripe_hashtbl; /* only protect corresponding hash list and inactive_list */ @@ -635,15 +646,7 @@ struct r5conf { */ int recovery_disabled; /* per cpu variables */ - struct raid5_percpu { - struct page *spare_page; /* Used when checking P/Q in raid6 */ - void *scribble; /* space for constructing buffer - * lists and performing address - * conversions - */ - int scribble_obj_size; - local_lock_t lock; - } __percpu *percpu; + struct raid5_percpu __percpu *percpu; int scribble_disks; int scribble_sectors; struct hlist_node node; |