diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-06 20:50:19 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-06 20:50:19 +0200 |
commit | 83c7c18b169bdac3dabab763d16549c1e4688a8b (patch) | |
tree | 438488db09b9776ff80738e5bb0e807319099556 /drivers/md/dm.c | |
parent | Merge branch 'work.misc' of git://git.kernel.org/pub/scm/linux/kernel/git/vir... (diff) | |
parent | dm: remove fmode_t argument from .prepare_ioctl hook (diff) | |
download | linux-83c7c18b169bdac3dabab763d16549c1e4688a8b.tar.xz linux-83c7c18b169bdac3dabab763d16549c1e4688a8b.zip |
Merge tag 'for-4.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm
Pull device mapper updates from Mike Snitzer:
- DM core passthrough ioctl fix to retain reference to DM table, and
that table's block devices, while issuing the ioctl to one of those
block devices.
- DM core passthrough ioctl fix to _not_ override the fmode_t used to
issue the ioctl. Overriding by using the fmode_t that the block
device was originally open with during DM table load is a liability.
- Add DM core support for secure erase forwarding and update the DM
linear and DM striped targets to support them.
- A DM core 4.16 stable fix to allow abnormal IO (e.g. discard, write
same, write zeroes) for targets that make use of the non-splitting IO
variant (as is done for multipath or thinp when layered directly on
NVMe).
- Allow DM targets to return a payload in response to a DM message that
they are sent. This is useful for DM targets that would like to
provide statistics data in response to DM messages.
- Update DM bufio to support non-power-of-2 block sizes. Numerous other
related changes prepare the DM bufio code for this support.
- Fix DM crypt to use a bounded amount of memory across the entire
system. This is to avoid OOM that can otherwise occur in response to
certain pathological IO workloads (e.g. discarding a large DM crypt
device).
- Add a 'check_at_most_once' feature to the DM verity target to allow
verity to be used on mobile devices that have very limited resources.
- Fix the DM integrity target to fail early if a keyed algorithm (e.g.
HMAC) is to be used but the key isn't set.
- Add non-power-of-2 support to the DM unstripe target.
- Eliminate the use of a Variable Length Array in the DM stripe target.
- Update the DM log-writes target to record metadata (REQ_META flag).
- DM raid fixes for its nosync status and some variable range issues.
* tag 'for-4.17/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm: (28 commits)
dm: remove fmode_t argument from .prepare_ioctl hook
dm: hold DM table for duration of ioctl rather than use blkdev_get
dm raid: fix parse_raid_params() variable range issue
dm verity: make verity_for_io_block static
dm verity: add 'check_at_most_once' option to only validate hashes once
dm bufio: don't embed a bio in the dm_buffer structure
dm bufio: support non-power-of-two block sizes
dm bufio: use slab cache for dm_buffer structure allocations
dm bufio: reorder fields in dm_buffer structure
dm bufio: relax alignment constraint on slab cache
dm bufio: remove code that merges slab caches
dm bufio: get rid of slab cache name allocations
dm bufio: move dm-bufio.h to include/linux/
dm bufio: delete outdated comment
dm: add support for secure erase forwarding
dm: backfill abnormal IO support to non-splitting IO submission
dm raid: fix nosync status
dm mpath: use DM_MAPIO_SUBMITTED instead of magic number 0 in process_queued_bios()
dm stripe: get rid of a Variable Length Array (VLA)
dm log writes: record metadata flag for better flags record
...
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r-- | drivers/md/dm.c | 145 |
1 files changed, 80 insertions, 65 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ded74e1eb0d1..5a81c47be4e4 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -458,67 +458,56 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return dm_get_geometry(md, geo); } -static char *_dm_claim_ptr = "I belong to device-mapper"; - -static int dm_get_bdev_for_ioctl(struct mapped_device *md, - struct block_device **bdev, - fmode_t *mode) +static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, + struct block_device **bdev) + __acquires(md->io_barrier) { struct dm_target *tgt; struct dm_table *map; - int srcu_idx, r, r2; + int r; retry: r = -ENOTTY; - map = dm_get_live_table(md, &srcu_idx); + map = dm_get_live_table(md, srcu_idx); if (!map || !dm_table_get_size(map)) - goto out; + return r; /* We only support devices that have a single target */ if (dm_table_get_num_targets(map) != 1) - goto out; + return r; tgt = dm_table_get_target(map, 0); if (!tgt->type->prepare_ioctl) - goto out; - - if (dm_suspended_md(md)) { - r = -EAGAIN; - goto out; - } - - r = tgt->type->prepare_ioctl(tgt, bdev, mode); - if (r < 0) - goto out; - - bdgrab(*bdev); - r2 = blkdev_get(*bdev, *mode, _dm_claim_ptr); - if (r2 < 0) { - r = r2; - goto out; - } + return r; - dm_put_live_table(md, srcu_idx); - return r; + if (dm_suspended_md(md)) + return -EAGAIN; -out: - dm_put_live_table(md, srcu_idx); + r = tgt->type->prepare_ioctl(tgt, bdev); if (r == -ENOTCONN && !fatal_signal_pending(current)) { + dm_put_live_table(md, *srcu_idx); msleep(10); goto retry; } + return r; } +static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) + __releases(md->io_barrier) +{ + dm_put_live_table(md, srcu_idx); +} + static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct mapped_device *md = bdev->bd_disk->private_data; - int r; + int r, srcu_idx; - r = dm_get_bdev_for_ioctl(md, &bdev, &mode); + r = dm_prepare_ioctl(md, &srcu_idx, &bdev); if (r < 0) - return r; + goto out; if (r > 0) { /* @@ -536,7 +525,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, r = __blkdev_driver_ioctl(bdev, mode, cmd, arg); out: - blkdev_put(bdev, mode); + dm_unprepare_ioctl(md, srcu_idx); return r; } @@ -710,6 +699,8 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) rcu_read_unlock(); } +static char *_dm_claim_ptr = "I belong to device-mapper"; + /* * Open a table device so we can use it as a map destination. */ @@ -1414,6 +1405,11 @@ static unsigned get_num_discard_bios(struct dm_target *ti) return ti->num_discard_bios; } +static unsigned get_num_secure_erase_bios(struct dm_target *ti) +{ + return ti->num_secure_erase_bios; +} + static unsigned get_num_write_same_bios(struct dm_target *ti) { return ti->num_write_same_bios; @@ -1467,6 +1463,11 @@ static int __send_discard(struct clone_info *ci, struct dm_target *ti) is_split_required_for_discard); } +static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti) +{ + return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios, NULL); +} + static int __send_write_same(struct clone_info *ci, struct dm_target *ti) { return __send_changing_extent_only(ci, ti, get_num_write_same_bios, NULL); @@ -1477,6 +1478,25 @@ static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti) return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL); } +static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti, + int *result) +{ + struct bio *bio = ci->bio; + + if (bio_op(bio) == REQ_OP_DISCARD) + *result = __send_discard(ci, ti); + else if (bio_op(bio) == REQ_OP_SECURE_ERASE) + *result = __send_secure_erase(ci, ti); + else if (bio_op(bio) == REQ_OP_WRITE_SAME) + *result = __send_write_same(ci, ti); + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES) + *result = __send_write_zeroes(ci, ti); + else + return false; + + return true; +} + /* * Select the correct strategy for processing a non-flush bio. */ @@ -1491,12 +1511,8 @@ static int __split_and_process_non_flush(struct clone_info *ci) if (!dm_target_is_valid(ti)) return -EIO; - if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) - return __send_discard(ci, ti); - else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) - return __send_write_same(ci, ti); - else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES)) - return __send_write_zeroes(ci, ti); + if (unlikely(__process_abnormal_io(ci, ti, &r))) + return r; if (bio_op(bio) == REQ_OP_ZONE_REPORT) len = ci->sector_count; @@ -1617,9 +1633,12 @@ static blk_qc_t __process_bio(struct mapped_device *md, goto out; } - tio = alloc_tio(&ci, ti, 0, GFP_NOIO); ci.bio = bio; ci.sector_count = bio_sectors(bio); + if (unlikely(__process_abnormal_io(&ci, ti, &error))) + goto out; + + tio = alloc_tio(&ci, ti, 0, GFP_NOIO); ret = __clone_and_map_simple_bio(&ci, tio, NULL); } out: @@ -3015,20 +3034,19 @@ static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; - fmode_t mode; - int r; + int r, srcu_idx; - r = dm_get_bdev_for_ioctl(md, &bdev, &mode); + r = dm_prepare_ioctl(md, &srcu_idx, &bdev); if (r < 0) - return r; + goto out; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_reserve) r = ops->pr_reserve(bdev, key, type, flags); else r = -EOPNOTSUPP; - - blkdev_put(bdev, mode); +out: + dm_unprepare_ioctl(md, srcu_idx); return r; } @@ -3036,20 +3054,19 @@ static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; - fmode_t mode; - int r; + int r, srcu_idx; - r = dm_get_bdev_for_ioctl(md, &bdev, &mode); + r = dm_prepare_ioctl(md, &srcu_idx, &bdev); if (r < 0) - return r; + goto out; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_release) r = ops->pr_release(bdev, key, type); else r = -EOPNOTSUPP; - - blkdev_put(bdev, mode); +out: + dm_unprepare_ioctl(md, srcu_idx); return r; } @@ -3058,20 +3075,19 @@ static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key, { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; - fmode_t mode; - int r; + int r, srcu_idx; - r = dm_get_bdev_for_ioctl(md, &bdev, &mode); + r = dm_prepare_ioctl(md, &srcu_idx, &bdev); if (r < 0) - return r; + goto out; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_preempt) r = ops->pr_preempt(bdev, old_key, new_key, type, abort); else r = -EOPNOTSUPP; - - blkdev_put(bdev, mode); +out: + dm_unprepare_ioctl(md, srcu_idx); return r; } @@ -3079,20 +3095,19 @@ static int dm_pr_clear(struct block_device *bdev, u64 key) { struct mapped_device *md = bdev->bd_disk->private_data; const struct pr_ops *ops; - fmode_t mode; - int r; + int r, srcu_idx; - r = dm_get_bdev_for_ioctl(md, &bdev, &mode); + r = dm_prepare_ioctl(md, &srcu_idx, &bdev); if (r < 0) - return r; + goto out; ops = bdev->bd_disk->fops->pr_ops; if (ops && ops->pr_clear) r = ops->pr_clear(bdev, key); else r = -EOPNOTSUPP; - - blkdev_put(bdev, mode); +out: + dm_unprepare_ioctl(md, srcu_idx); return r; } |