summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-mpath.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2020-06-10 21:02:37 +0200
committerMike Snitzer <snitzer@redhat.com>2020-07-13 17:47:32 +0200
commit69cea0d45a618ad4ae74f36386ef1af5128b2b19 (patch)
treeecc2ec6341ec5e5c5339fba4075eafab467573fb /drivers/md/dm-mpath.c
parentLinux 5.8-rc5 (diff)
downloadlinux-69cea0d45a618ad4ae74f36386ef1af5128b2b19.tar.xz
linux-69cea0d45a618ad4ae74f36386ef1af5128b2b19.zip
dm mpath: changes from initial m->flags locking audit
Fix locking in slow-paths where m->lock should be taken. Signed-off-by: Mike Snitzer <snitzer@rredhat.com>
Diffstat (limited to 'drivers/md/dm-mpath.c')
-rw-r--r--drivers/md/dm-mpath.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 78cff42d987e..d7bb74bded8c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -335,6 +335,8 @@ static int pg_init_all_paths(struct multipath *m)
static void __switch_pg(struct multipath *m, struct priority_group *pg)
{
+ lockdep_assert_held(&m->lock);
+
m->current_pg = pg;
/* Must we initialise the PG first, and queue I/O till it's ready? */
@@ -382,7 +384,9 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
+ spin_lock_irqsave(&m->lock, flags);
clear_bit(MPATHF_QUEUE_IO, &m->flags);
+ spin_unlock_irqrestore(&m->lock, flags);
goto failed;
}
@@ -422,8 +426,11 @@ check_current_pg:
continue;
pgpath = choose_path_in_pg(m, pg, nr_bytes);
if (!IS_ERR_OR_NULL(pgpath)) {
- if (!bypassed)
+ if (!bypassed) {
+ spin_lock_irqsave(&m->lock, flags);
set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
+ spin_unlock_irqrestore(&m->lock, flags);
+ }
return pgpath;
}
}
@@ -1662,9 +1669,9 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
spin_lock_irqsave(&m->lock, flags);
bio_list_add(&m->queued_bios, clone);
- spin_unlock_irqrestore(&m->lock, flags);
if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
queue_work(kmultipathd, &m->process_queued_bios);
+ spin_unlock_irqrestore(&m->lock, flags);
r = DM_ENDIO_INCOMPLETE;
done:
@@ -1938,6 +1945,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
{
struct multipath *m = ti->private;
struct pgpath *current_pgpath;
+ unsigned long flags;
int r;
current_pgpath = READ_ONCE(m->current_pgpath);
@@ -1965,8 +1973,10 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/* Path status changed, redo selection */
(void) choose_pgpath(m, 0);
}
+ spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
- pg_init_all_paths(m);
+ (void) __pg_init_all_paths(m);
+ spin_unlock_irqrestore(&m->lock, flags);
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}