diff options
-rw-r--r-- | drivers/md/dm-core.h | 2 | ||||
-rw-r--r-- | drivers/md/dm-ioctl.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 5 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 14 | ||||
-rw-r--r-- | drivers/md/dm.c | 11 | ||||
-rw-r--r-- | drivers/md/dm.h | 8 | ||||
-rw-r--r-- | include/linux/device-mapper.h | 14 |
7 files changed, 32 insertions, 24 deletions
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 136fda3ff9e5..b92f74d9a982 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -47,7 +47,7 @@ struct mapped_device { struct request_queue *queue; int numa_node_id; - unsigned type; + enum dm_queue_mode type; /* Protect queue and type against concurrent access. */ struct mutex type_lock; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index ddda8107aa7e..2d5d7064acbf 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1260,7 +1260,7 @@ static int populate_table(struct dm_table *table, return dm_table_complete(table); } -static bool is_valid_type(unsigned cur, unsigned new) +static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new) { if (cur == new || (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED)) diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 730ec0be1afa..8336332bd61f 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -90,7 +90,7 @@ struct multipath { atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */ atomic_t pg_init_count; /* Number of times pg_init called */ - unsigned queue_mode; + enum dm_queue_mode queue_mode; struct mutex work_mutex; struct work_struct trigger_event; @@ -1700,6 +1700,9 @@ static void multipath_status(struct dm_target *ti, status_type_t type, case DM_TYPE_MQ_REQUEST_BASED: DMEMIT("queue_mode mq "); break; + default: + WARN_ON_ONCE(true); + break; } } } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 515136f29115..a02a04829156 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -30,7 +30,7 @@ struct dm_table { struct mapped_device *md; - unsigned type; + enum dm_queue_mode type; /* btree table */ unsigned int depth; @@ -825,19 +825,19 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args) } EXPORT_SYMBOL(dm_consume_args); -static bool __table_type_bio_based(unsigned table_type) +static bool __table_type_bio_based(enum dm_queue_mode table_type) { return (table_type == DM_TYPE_BIO_BASED || table_type == DM_TYPE_DAX_BIO_BASED); } -static bool __table_type_request_based(unsigned table_type) +static bool __table_type_request_based(enum dm_queue_mode table_type) { return (table_type == DM_TYPE_REQUEST_BASED || table_type == DM_TYPE_MQ_REQUEST_BASED); } -void dm_table_set_type(struct dm_table *t, unsigned type) +void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) { t->type = type; } @@ -879,7 +879,7 @@ static int dm_table_determine_type(struct dm_table *t) struct dm_target *tgt; struct dm_dev_internal *dd; struct list_head *devices = dm_table_get_devices(t); - unsigned live_md_type = dm_get_md_type(t->md); + enum dm_queue_mode live_md_type = dm_get_md_type(t->md); if (t->type != DM_TYPE_NONE) { /* target already set the table's type */ @@ -988,7 +988,7 @@ verify_rq_based: return 0; } -unsigned dm_table_get_type(struct dm_table *t) +enum dm_queue_mode dm_table_get_type(struct dm_table *t) { return t->type; } @@ -1039,7 +1039,7 @@ bool dm_table_all_blk_mq_devices(struct dm_table *t) static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) { - unsigned type = dm_table_get_type(t); + enum dm_queue_mode type = dm_table_get_type(t); unsigned per_io_data_size = 0; struct dm_target *tgt; unsigned i; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 9940c9a42665..45660246e8f5 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1807,13 +1807,13 @@ void dm_unlock_md_type(struct mapped_device *md) mutex_unlock(&md->type_lock); } -void dm_set_md_type(struct mapped_device *md, unsigned type) +void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) { BUG_ON(!mutex_is_locked(&md->type_lock)); md->type = type; } -unsigned dm_get_md_type(struct mapped_device *md) +enum dm_queue_mode dm_get_md_type(struct mapped_device *md) { return md->type; } @@ -1840,7 +1840,7 @@ EXPORT_SYMBOL_GPL(dm_get_queue_limits); int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) { int r; - unsigned type = dm_get_md_type(md); + enum dm_queue_mode type = dm_get_md_type(md); switch (type) { case DM_TYPE_REQUEST_BASED: @@ -1871,6 +1871,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) if (type == DM_TYPE_DAX_BIO_BASED) queue_flag_set_unlocked(QUEUE_FLAG_DAX, md->queue); break; + case DM_TYPE_NONE: + WARN_ON_ONCE(true); + break; } return 0; @@ -2556,7 +2559,7 @@ int dm_noflush_suspending(struct dm_target *ti) } EXPORT_SYMBOL_GPL(dm_noflush_suspending); -struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, +struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, unsigned integrity, unsigned per_io_data_size) { struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index f298b01f7ab3..38c84c0a35d4 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -64,7 +64,7 @@ void dm_table_presuspend_undo_targets(struct dm_table *t); void dm_table_postsuspend_targets(struct dm_table *t); int dm_table_resume_targets(struct dm_table *t); int dm_table_any_congested(struct dm_table *t, int bdi_bits); -unsigned dm_table_get_type(struct dm_table *t); +enum dm_queue_mode dm_table_get_type(struct dm_table *t); struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); @@ -76,8 +76,8 @@ struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); -void dm_set_md_type(struct mapped_device *md, unsigned type); -unsigned dm_get_md_type(struct mapped_device *md); +void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type); +enum dm_queue_mode dm_get_md_type(struct mapped_device *md); struct target_type *dm_get_immutable_target_type(struct mapped_device *md); int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t); @@ -204,7 +204,7 @@ void dm_kcopyd_exit(void); /* * Mempool operations */ -struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, +struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, unsigned integrity, unsigned per_bio_data_size); void dm_free_md_mempools(struct dm_md_mempools *pools); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 98f981026e4e..1ce4036224eb 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -22,11 +22,13 @@ struct bio_vec; /* * Type of table, mapped_device's mempool and request_queue */ -#define DM_TYPE_NONE 0 -#define DM_TYPE_BIO_BASED 1 -#define DM_TYPE_REQUEST_BASED 2 -#define DM_TYPE_MQ_REQUEST_BASED 3 -#define DM_TYPE_DAX_BIO_BASED 4 +enum dm_queue_mode { + DM_TYPE_NONE = 0, + DM_TYPE_BIO_BASED = 1, + DM_TYPE_REQUEST_BASED = 2, + DM_TYPE_MQ_REQUEST_BASED = 3, + DM_TYPE_DAX_BIO_BASED = 4, +}; typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; @@ -476,7 +478,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback * Useful for "hybrid" target (supports both bio-based * and request-based). */ -void dm_table_set_type(struct dm_table *t, unsigned type); +void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); /* * Finally call this to make the table ready for use. |