diff options
author | David Sterba <dsterba@suse.com> | 2017-03-31 17:12:51 +0200 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2017-08-21 17:47:42 +0200 |
commit | 2073c4c2e51a9343af10862ba478e7a67d6caf04 (patch) | |
tree | b75e68bedddae6214a5b664b9f129f237049b6f9 | |
parent | btrfs: preserve i_mode if __btrfs_set_acl() fails (diff) | |
download | linux-2073c4c2e51a9343af10862ba478e7a67d6caf04.tar.xz linux-2073c4c2e51a9343af10862ba478e7a67d6caf04.zip |
btrfs: scrub: use bool for flush_all_writes
flush_all_writes is an atomic but does not use the semantics at all,
it's just on/off indicator, we can use bool.
Signed-off-by: David Sterba <dsterba@suse.com>
-rw-r--r-- | fs/btrfs/scrub.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 20c1ba19e665..810fce240489 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -182,8 +182,8 @@ struct scrub_ctx { struct scrub_bio *wr_curr_bio; struct mutex wr_lock; int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */ - atomic_t flush_all_writes; struct btrfs_device *wr_tgtdev; + bool flush_all_writes; /* * statistics @@ -717,7 +717,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) WARN_ON(!fs_info->dev_replace.tgtdev); sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; sctx->wr_tgtdev = fs_info->dev_replace.tgtdev; - atomic_set(&sctx->flush_all_writes, 0); + sctx->flush_all_writes = false; } return sctx; @@ -2402,8 +2402,7 @@ static void scrub_missing_raid56_worker(struct btrfs_work *work) scrub_block_put(sblock); - if (sctx->is_dev_replace && - atomic_read(&sctx->flush_all_writes)) { + if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); @@ -2607,8 +2606,7 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) sctx->first_free = sbio->index; spin_unlock(&sctx->list_lock); - if (sctx->is_dev_replace && - atomic_read(&sctx->flush_all_writes)) { + if (sctx->is_dev_replace && sctx->flush_all_writes) { mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); @@ -3440,14 +3438,14 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, */ if (atomic_read(&fs_info->scrub_pause_req)) { /* push queued extents */ - atomic_set(&sctx->flush_all_writes, 1); + sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0); - atomic_set(&sctx->flush_all_writes, 0); + sctx->flush_all_writes = false; scrub_blocked_if_needed(fs_info); } @@ -3892,7 +3890,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, * write requests are really completed when bios_in_flight * changes to 0. */ - atomic_set(&sctx->flush_all_writes, 1); + sctx->flush_all_writes = true; scrub_submit(sctx); mutex_lock(&sctx->wr_lock); scrub_wr_submit(sctx); @@ -3910,7 +3908,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, */ wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0); - atomic_set(&sctx->flush_all_writes, 0); + sctx->flush_all_writes = false; scrub_pause_off(fs_info); |