summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@kernel.org>2022-04-16 02:08:23 +0200
committerMike Snitzer <snitzer@kernel.org>2022-05-05 23:31:36 +0200
commit9d20653fe84ebd772c3af71808e6a727603e0b71 (patch)
treedf8b5113bb8c33bb07caba8cd45ef868f8b542e5
parentdm: put all polled dm_io instances into a single list (diff)
downloadlinux-9d20653fe84ebd772c3af71808e6a727603e0b71.tar.xz
linux-9d20653fe84ebd772c3af71808e6a727603e0b71.zip
dm: simplify bio-based IO accounting further
Now that io splitting is recorded prior to, or during, ->map IO accounting can happen immediately rather than defer until after bio splitting in dm_split_and_process_bio(). Remove the DM_IO_START_ACCT flag and also remove dm_io's map_task member because there is no longer any need to wait for splitting to occur before accounting. Also move dm_io struct's 'flags' member to consolidate struct holes. Signed-off-by: Mike Snitzer <snitzer@kernel.org>
-rw-r--r--drivers/md/dm-core.h6
-rw-r--r--drivers/md/dm.c34
2 files changed, 6 insertions, 34 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index f3cfc7affd12..d21648a923ea 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -255,15 +255,12 @@ static inline bool dm_tio_is_normal(struct dm_target_io *tio)
#define DM_IO_MAGIC 19577
struct dm_io {
unsigned short magic;
-
+ blk_short_t flags;
spinlock_t lock;
unsigned long start_time;
void *data;
struct dm_io *next;
- struct task_struct *map_task;
struct dm_stats_aux stats_aux;
-
- blk_short_t flags;
blk_status_t status;
atomic_t io_count;
struct mapped_device *md;
@@ -281,7 +278,6 @@ struct dm_io {
* dm_io flags
*/
enum {
- DM_IO_START_ACCT,
DM_IO_ACCOUNTED,
DM_IO_WAS_SPLIT
};
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index cd084a74c65b..50e081f68792 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -596,7 +596,6 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
this_cpu_inc(*md->pending_io);
io->orig_bio = bio;
io->md = md;
- io->map_task = current;
spin_lock_init(&io->lock);
io->start_time = jiffies;
io->flags = 0;
@@ -1241,13 +1240,6 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
}
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
-static inline void __dm_submit_bio_remap(struct bio *clone,
- dev_t dev, sector_t old_sector)
-{
- trace_block_bio_remap(clone, dev, old_sector);
- submit_bio_noacct(clone);
-}
-
/*
* @clone: clone bio that DM core passed to target's .map function
* @tgt_clone: clone of @clone bio that target needs submitted
@@ -1262,8 +1254,6 @@ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
struct dm_target_io *tio = clone_to_tio(clone);
struct dm_io *io = tio->io;
- WARN_ON_ONCE(!tio->ti->accounts_remapped_io);
-
/* establish bio that will get submitted */
if (!tgt_clone)
tgt_clone = clone;
@@ -1272,15 +1262,11 @@ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
* Account io->origin_bio to DM dev on behalf of target
* that took ownership of IO with DM_MAPIO_SUBMITTED.
*/
- if (io->map_task == current) {
- /* Still in target's map function */
- dm_io_set_flag(io, DM_IO_START_ACCT);
- } else {
- dm_start_io_acct(io, clone);
- }
+ dm_start_io_acct(io, clone);
- __dm_submit_bio_remap(tgt_clone, disk_devt(io->md->disk),
+ trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
tio->old_sector);
+ submit_bio_noacct(tgt_clone);
}
EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
@@ -1340,16 +1326,10 @@ static void __map_bio(struct bio *clone)
case DM_MAPIO_SUBMITTED:
/* target has assumed ownership of this io */
if (!ti->accounts_remapped_io)
- dm_io_set_flag(io, DM_IO_START_ACCT);
+ dm_start_io_acct(io, clone);
break;
case DM_MAPIO_REMAPPED:
- /*
- * the bio has been remapped so dispatch it, but defer
- * dm_start_io_acct() until after possible bio_split().
- */
- __dm_submit_bio_remap(clone, disk_devt(md->disk),
- tio->old_sector);
- dm_io_set_flag(io, DM_IO_START_ACCT);
+ dm_submit_bio_remap(clone, NULL);
break;
case DM_MAPIO_KILL:
case DM_MAPIO_REQUEUE:
@@ -1667,7 +1647,6 @@ static void dm_split_and_process_bio(struct mapped_device *md,
}
error = __split_and_process_bio(&ci);
- io->map_task = NULL;
if (error || !ci.sector_count)
goto out;
/*
@@ -1679,9 +1658,6 @@ static void dm_split_and_process_bio(struct mapped_device *md,
bio_inc_remaining(bio);
submit_bio_noacct(bio);
out:
- if (dm_io_flagged(io, DM_IO_START_ACCT))
- dm_start_io_acct(io, NULL);
-
/*
* Drop the extra reference count for non-POLLED bio, and hold one
* reference for POLLED bio, which will be released in dm_poll_bio