summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-delay.c2
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/dm.c8
5 files changed, 11 insertions, 10 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 8cc03c0c262e..8d3d11887343 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -237,6 +237,7 @@ struct dm_io {
unsigned long start_time;
void *data;
struct hlist_node node;
+ struct task_struct *map_task;
spinlock_t endio_lock;
struct dm_stats_aux stats_aux;
/* last member of dm_target_io is 'struct bio' */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index ec746cc4b1f8..a1730cc8ae27 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1857,7 +1857,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
return 1;
}
- dm_submit_bio_remap(io->base_bio, clone, (gfp != CRYPT_MAP_READ_GFP));
+ dm_submit_bio_remap(io->base_bio, clone);
return 0;
}
@@ -1883,7 +1883,7 @@ static void kcryptd_io_write(struct dm_crypt_io *io)
{
struct bio *clone = io->ctx.bio_out;
- dm_submit_bio_remap(io->base_bio, clone, true);
+ dm_submit_bio_remap(io->base_bio, clone);
}
#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
@@ -1962,7 +1962,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
if ((likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) ||
test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)) {
- dm_submit_bio_remap(io->base_bio, clone, true);
+ dm_submit_bio_remap(io->base_bio, clone);
return;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index b25b45011b11..9a51bf51a859 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -72,7 +72,7 @@ static void flush_bios(struct bio *bio)
while (bio) {
n = bio->bi_next;
bio->bi_next = NULL;
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
bio = n;
}
}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ba74bc22ba42..4d25d0e27031 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -755,7 +755,7 @@ static void issue(struct thin_c *tc, struct bio *bio)
struct pool *pool = tc->pool;
if (!bio_triggers_commit(tc, bio)) {
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
return;
}
@@ -2383,7 +2383,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio->bi_opf & REQ_PREFLUSH)
bio_endio(bio);
else
- dm_submit_bio_remap(bio, NULL, true);
+ dm_submit_bio_remap(bio, NULL);
}
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1c5c9036a20e..c470f54f9193 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -574,6 +574,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
this_cpu_inc(*md->pending_io);
io->orig_bio = NULL;
io->md = md;
+ io->map_task = current;
spin_lock_init(&io->endio_lock);
io->start_time = jiffies;
@@ -1189,15 +1190,13 @@ static inline void __dm_submit_bio_remap(struct bio *clone,
/*
* @clone: clone bio that DM core passed to target's .map function
* @tgt_clone: clone of @clone bio that target needs submitted
- * @from_wq: caller is a workqueue thread managed by DM target
*
* Targets should use this interface to submit bios they take
* ownership of when returning DM_MAPIO_SUBMITTED.
*
* Target should also enable ti->accounts_remapped_io
*/
-void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone,
- bool from_wq)
+void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
{
struct dm_target_io *tio = clone_to_tio(clone);
struct dm_io *io = tio->io;
@@ -1212,7 +1211,7 @@ void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone,
* Account io->origin_bio to DM dev on behalf of target
* that took ownership of IO with DM_MAPIO_SUBMITTED.
*/
- if (!from_wq) {
+ if (io->map_task == current) {
/* Still in target's map function */
io->start_io_acct = true;
} else {
@@ -1568,6 +1567,7 @@ static void dm_split_and_process_bio(struct mapped_device *md,
}
error = __split_and_process_bio(&ci);
+ ci.io->map_task = NULL;
if (error || !ci.sector_count)
goto out;