summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJohn Pittman <jpittman@redhat.com>2018-08-23 19:35:57 +0200
committerMike Snitzer <snitzer@redhat.com>2018-10-16 20:27:38 +0200
commit092b5648760a23656f6dcc83b74f4cf661094aca (patch)
tree6f3d1b43bf3cd0caa7a8ac3fa4b60f37221f519e /drivers/md
parentdm thin: use refcount_t for thin_c reference counting (diff)
downloadlinux-092b5648760a23656f6dcc83b74f4cf661094aca.tar.xz
linux-092b5648760a23656f6dcc83b74f4cf661094aca.zip
dm zoned: target: use refcount_t for dm zoned reference counters
The API surrounding refcount_t should be used in place of atomic_t when variables are being used as reference counters. This API can prevent issues such as counter overflows and use-after-free conditions. Within the dm zoned target stack, the atomic_t API is used for bioctx->ref and cw->refcount. Change these to use refcount_t, avoiding the issues mentioned. Signed-off-by: John Pittman <jpittman@redhat.com> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Tested-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-zoned-target.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index a44183ff4be0..fa36825c1eff 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -19,7 +19,7 @@ struct dmz_bioctx {
struct dmz_target *target;
struct dm_zone *zone;
struct bio *bio;
- atomic_t ref;
+ refcount_t ref;
blk_status_t status;
};
@@ -28,7 +28,7 @@ struct dmz_bioctx {
*/
struct dm_chunk_work {
struct work_struct work;
- atomic_t refcount;
+ refcount_t refcount;
struct dmz_target *target;
unsigned int chunk;
struct bio_list bio_list;
@@ -115,7 +115,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks == dmz_bio_blocks(bio)) {
/* Setup and submit the BIO */
bio->bi_iter.bi_sector = sector;
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
return 0;
}
@@ -134,7 +134,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
/* Submit the clone */
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(clone);
return 0;
@@ -240,7 +240,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
/* Setup and submit the BIO */
bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
if (dmz_is_seq(zone))
@@ -456,7 +456,7 @@ out:
*/
static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
{
- atomic_inc(&cw->refcount);
+ refcount_inc(&cw->refcount);
}
/*
@@ -465,7 +465,7 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
- if (atomic_dec_and_test(&cw->refcount)) {
+ if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
kfree(cw);
@@ -546,7 +546,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
goto out;
INIT_WORK(&cw->work, dmz_chunk_work);
- atomic_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 0);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -599,7 +599,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->target = dmz;
bioctx->zone = NULL;
bioctx->bio = bio;
- atomic_set(&bioctx->ref, 1);
+ refcount_set(&bioctx->ref, 1);
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
@@ -633,7 +633,7 @@ static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error
if (bioctx->status == BLK_STS_OK && *error)
bioctx->status = *error;
- if (!atomic_dec_and_test(&bioctx->ref))
+ if (!refcount_dec_and_test(&bioctx->ref))
return DM_ENDIO_INCOMPLETE;
/* Done */