diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-11-02 22:12:00 +0100 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-22 23:09:51 +0200 |
commit | a8b3a677e786fa869d220a6a78b5532a36dc2f4d (patch) | |
tree | 3fdbdbb71945ae42dab8dc94971e1c78286eaa63 | |
parent | bcachefs: Data update support for unwritten extents (diff) | |
download | linux-a8b3a677e786fa869d220a6a78b5532a36dc2f4d.tar.xz linux-a8b3a677e786fa869d220a6a78b5532a36dc2f4d.zip |
bcachefs: Nocow support
This adds support for nocow mode, where we do writes in-place when
possible. Patch components:
- New boolean filesystem and inode option, nocow: note that when nocow
is enabled, data checksumming and compression are implicitly disabled
- To prevent in-place writes from racing with data moves
(data_update.c) or bucket reuse (i.e. a bucket being reused and
re-allocated while a nocow write is in flight, we have a new locking
mechanism.
Buckets can be locked for either data update or data move, using a
fixed size hash table of two_state_shared locks. We don't have any
chaining, meaning updates and moves to different buckets that hash to
the same lock will wait unnecessarily - we'll want to watch for this
becoming an issue.
- The allocator path also needs to check for in-place writes in flight
to a given bucket before giving it out: thus we add another counter
to bucket_alloc_state so we can track this.
- Fsync now may need to issue cache flushes to block devices instead of
flushing the journal. We add a device bitmask to bch_inode_info,
ei_devs_need_flush, which tracks devices that need to have flushes
issued - note that this will lead to unnecessary flushes when other
codepaths have already issued flushes, we may want to replace this with
a sequence number.
- New nocow write path: look up extents, and if they're writable write
to them - otherwise fall back to the normal COW write path.
XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush
XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
-rw-r--r-- | fs/bcachefs/Makefile | 1 | ||||
-rw-r--r-- | fs/bcachefs/alloc_foreground.c | 5 | ||||
-rw-r--r-- | fs/bcachefs/alloc_types.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs.h | 10 | ||||
-rw-r--r-- | fs/bcachefs/bcachefs_format.h | 10 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/checksum.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/data_update.c | 10 | ||||
-rw-r--r-- | fs/bcachefs/extents.c | 39 | ||||
-rw-r--r-- | fs/bcachefs/extents.h | 1 | ||||
-rw-r--r-- | fs/bcachefs/fs-io.c | 98 | ||||
-rw-r--r-- | fs/bcachefs/fs.h | 11 | ||||
-rw-r--r-- | fs/bcachefs/inode.c | 3 | ||||
-rw-r--r-- | fs/bcachefs/io.c | 452 | ||||
-rw-r--r-- | fs/bcachefs/io.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/io_types.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/move.c | 7 | ||||
-rw-r--r-- | fs/bcachefs/nocow_locking.c | 15 | ||||
-rw-r--r-- | fs/bcachefs/nocow_locking.h | 55 | ||||
-rw-r--r-- | fs/bcachefs/opts.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/super.h | 7 | ||||
-rw-r--r-- | fs/bcachefs/trace.h | 5 |
22 files changed, 709 insertions, 52 deletions
diff --git a/fs/bcachefs/Makefile b/fs/bcachefs/Makefile index 456d540441ce..55b6d85d55c3 100644 --- a/fs/bcachefs/Makefile +++ b/fs/bcachefs/Makefile @@ -52,6 +52,7 @@ bcachefs-y := \ migrate.o \ move.o \ movinggc.o \ + nocow_locking.o \ opts.o \ printbuf.o \ quota.o \ diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c index a179bbe23c93..f78eaa52c11f 100644 --- a/fs/bcachefs/alloc_foreground.c +++ b/fs/bcachefs/alloc_foreground.c @@ -227,6 +227,11 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev * return NULL; } + if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) { + s->skipped_nocow++; + return NULL; + } + spin_lock(&c->freelist_lock); if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) { diff --git a/fs/bcachefs/alloc_types.h b/fs/bcachefs/alloc_types.h index 2c96794d1993..2e6f48069258 100644 --- a/fs/bcachefs/alloc_types.h +++ b/fs/bcachefs/alloc_types.h @@ -12,6 +12,7 @@ struct bucket_alloc_state { u64 buckets_seen; u64 skipped_open; u64 skipped_need_journal_commit; + u64 skipped_nocow; u64 skipped_nouse; }; diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h index 6d048e5d8843..74632105fb45 100644 --- a/fs/bcachefs/bcachefs.h +++ b/fs/bcachefs/bcachefs.h @@ -206,6 +206,7 @@ #include "bcachefs_format.h" #include "errcode.h" #include "fifo.h" +#include "nocow_locking.h" #include "opts.h" #include "util.h" @@ -383,7 +384,8 @@ BCH_DEBUG_PARAMS_DEBUG() x(journal_flush_seq) \ x(blocked_journal) \ x(blocked_allocate) \ - x(blocked_allocate_open_bucket) + x(blocked_allocate_open_bucket) \ + x(nocow_lock_contended) enum bch_time_stats { #define x(name) BCH_TIME_##name, @@ -483,6 +485,7 @@ struct bch_dev { struct bch_sb *sb_read_scratch; int sb_write_error; dev_t dev; + atomic_t flush_seq; struct bch_devs_mask self; @@ -897,7 +900,9 @@ struct bch_fs { struct bio_set bio_read_split; struct bio_set bio_write; struct mutex bio_bounce_pages_lock; -mempool_t bio_bounce_pages; + mempool_t bio_bounce_pages; + struct bucket_nocow_lock_table + nocow_locks; struct rhashtable promote_table; mempool_t compression_bounce[2]; @@ -959,6 +964,7 @@ mempool_t bio_bounce_pages; struct bio_set writepage_bioset; struct bio_set dio_write_bioset; struct bio_set dio_read_bioset; + struct bio_set nocow_flush_bioset; /* ERRORS */ struct list_head fsck_errors; diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h index 57327c4dc9b4..024a714955f2 100644 --- a/fs/bcachefs/bcachefs_format.h +++ b/fs/bcachefs/bcachefs_format.h @@ -798,7 +798,8 @@ struct bch_inode_generation { x(bi_dir, 64) \ x(bi_dir_offset, 64) \ x(bi_subvol, 32) \ - x(bi_parent_subvol, 32) + x(bi_parent_subvol, 32) \ + x(bi_nocow, 8) /* subset of BCH_INODE_FIELDS */ #define BCH_INODE_OPTS() \ @@ -810,7 +811,8 @@ struct bch_inode_generation { x(promote_target, 16) \ x(foreground_target, 16) \ x(background_target, 16) \ - x(erasure_code, 16) + x(erasure_code, 16) \ + x(nocow, 8) enum inode_opt_id { #define x(name, ...) \ @@ -1548,7 +1550,8 @@ struct bch_sb_field_journal_seq_blacklist { x(alloc_v4, 20) \ x(new_data_types, 21) \ x(backpointers, 22) \ - x(inode_v3, 23) + x(inode_v3, 23) \ + x(unwritten_extents, 24) enum bcachefs_metadata_version { bcachefs_metadata_version_min = 9, @@ -1696,6 +1699,7 @@ LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62); LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63); LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32); LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33); +LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34); LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54); /* diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index dfa45cf4021f..87d80a59dd7e 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -1832,7 +1832,8 @@ static void btree_write_submit(struct work_struct *work) bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr) ptr->offset += wbio->sector_offset; - bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k); + bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, + &tmp.k, false); } void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h index f7ccef7a5520..409ad534d9f4 100644 --- a/fs/bcachefs/checksum.h +++ b/fs/bcachefs/checksum.h @@ -99,14 +99,17 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type, } static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c, - unsigned opt) + struct bch_io_opts opts) { + if (opts.nocow) + return 0; + if (c->sb.encryption_type) return c->opts.wide_macs ? BCH_CSUM_chacha20_poly1305_128 : BCH_CSUM_chacha20_poly1305_80; - return bch2_csum_opt_to_type(opt, true); + return bch2_csum_opt_to_type(opts.data_checksum, true); } static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c) diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c index 82d7e13e61a5..c3f12b3adb14 100644 --- a/fs/bcachefs/data_update.c +++ b/fs/bcachefs/data_update.c @@ -303,6 +303,13 @@ void bch2_data_update_read_done(struct data_update *m, void bch2_data_update_exit(struct data_update *update) { struct bch_fs *c = update->op.c; + struct bkey_ptrs_c ptrs = + bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k)); + const struct bch_extent_ptr *ptr; + + bkey_for_each_ptr(ptrs, ptr) + bch2_bucket_nocow_unlock(&c->nocow_locks, + PTR_BUCKET_POS(c, ptr), 0); bch2_bkey_buf_exit(&update->k, c); bch2_disk_reservation_put(c, &update->op.res); @@ -451,6 +458,9 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m, m->op.incompressible = true; i++; + + bch2_bucket_nocow_lock(&c->nocow_locks, + PTR_BUCKET_POS(c, &p.ptr), 0); } if (reserve_sectors) { diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c index 627edba24900..55a8879dc4fe 100644 --- a/fs/bcachefs/extents.c +++ b/fs/bcachefs/extents.c @@ -664,22 +664,21 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k) return replicas; } -static unsigned bch2_extent_ptr_durability(struct bch_fs *c, - struct extent_ptr_decoded p) +unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p) { unsigned durability = 0; struct bch_dev *ca; - if (p.ptr.cached) + if (p->ptr.cached) return 0; - ca = bch_dev_bkey_exists(c, p.ptr.dev); + ca = bch_dev_bkey_exists(c, p->ptr.dev); if (ca->mi.state != BCH_MEMBER_STATE_failed) durability = max_t(unsigned, durability, ca->mi.durability); - if (p.has_ec) - durability += p.ec.redundancy; + if (p->has_ec) + durability += p->ec.redundancy; return durability; } @@ -692,7 +691,7 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k) unsigned durability = 0; bkey_for_each_ptr_decode(k.k, ptrs, p, entry) - durability += bch2_extent_ptr_durability(c, p); + durability += bch2_extent_ptr_durability(c,& p); return durability; } @@ -907,23 +906,31 @@ bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k, */ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2) { - struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1); - struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2); - const union bch_extent_entry *entry1, *entry2; - struct extent_ptr_decoded p1, p2; - - if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2)) + if (k1.k->type != k2.k->type) return false; - bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) - bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) + if (bkey_extent_is_direct_data(k1.k)) { + struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1); + struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2); + const union bch_extent_entry *entry1, *entry2; + struct extent_ptr_decoded p1, p2; + + if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2)) + return false; + + bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1) + bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2) if (p1.ptr.dev == p2.ptr.dev && p1.ptr.gen == p2.ptr.gen && (s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) == (s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k)) return true; - return false; + return false; + } else { + /* KEY_TYPE_deleted, etc. */ + return true; + } } bool bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1, diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h index 659ab76ea62c..e27d39b728b3 100644 --- a/fs/bcachefs/extents.h +++ b/fs/bcachefs/extents.h @@ -596,6 +596,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c); unsigned bch2_bkey_sectors_compressed(struct bkey_s_c); unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c); +unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *); unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c); void bch2_bkey_drop_device(struct bkey_s, unsigned); diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c index b5cf0a3218ea..ec575b27eedb 100644 --- a/fs/bcachefs/fs-io.c +++ b/fs/bcachefs/fs-io.c @@ -35,6 +35,72 @@ #include <trace/events/writeback.h> +struct nocow_flush { + struct closure *cl; + struct bch_dev *ca; + struct bio bio; +}; + +static void nocow_flush_endio(struct bio *_bio) +{ + + struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio); + + closure_put(bio->cl); + percpu_ref_put(&bio->ca->io_ref); + bio_put(&bio->bio); +} + +static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c, + struct bch_inode_info *inode, + struct closure *cl) +{ + struct nocow_flush *bio; + struct bch_dev *ca; + struct bch_devs_mask devs; + unsigned dev; + + dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX); + if (dev == BCH_SB_MEMBERS_MAX) + return; + + devs = inode->ei_devs_need_flush; + memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush)); + + for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) { + rcu_read_lock(); + ca = rcu_dereference(c->devs[dev]); + if (ca && !percpu_ref_tryget(&ca->io_ref)) + ca = NULL; + rcu_read_unlock(); + + if (!ca) + continue; + + bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0, + REQ_OP_FLUSH, + GFP_KERNEL, + &c->nocow_flush_bioset), + struct nocow_flush, bio); + bio->cl = cl; + bio->ca = ca; + bio->bio.bi_end_io = nocow_flush_endio; + closure_bio_submit(&bio->bio, cl); + } +} + +static int bch2_inode_flush_nocow_writes(struct bch_fs *c, + struct bch_inode_info *inode) +{ + struct closure cl; + + closure_init_stack(&cl); + bch2_inode_flush_nocow_writes_async(c, inode, &cl); + closure_sync(&cl); + + return 0; +} + static inline bool bio_full(struct bio *bio, unsigned len) { if (bio->bi_vcnt >= bio->bi_max_vecs) @@ -1327,6 +1393,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c, op->subvol = inode->ei_subvol; op->pos = POS(inode->v.i_ino, sector); op->end_io = bch2_writepage_io_done; + op->devs_need_flush = &inode->ei_devs_need_flush; op->wbio.bio.bi_iter.bi_sector = sector; op->wbio.bio.bi_opf = wbc_to_write_flags(wbc); } @@ -2148,10 +2215,12 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio) if (!dio->op.error) { ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode); - if (ret) + if (ret) { dio->op.error = ret; - else + } else { bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl); + bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl); + } } if (dio->sync) { @@ -2296,6 +2365,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio) dio->op.nr_replicas = dio->op.opts.data_replicas; dio->op.subvol = inode->ei_subvol; dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9); + dio->op.devs_need_flush = &inode->ei_devs_need_flush; if (sync) dio->op.flags |= BCH_WRITE_SYNC; @@ -2495,19 +2565,21 @@ out: * inode->ei_inode.bi_journal_seq won't be up to date since it's set in an * insert trigger: look up the btree inode instead */ -static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum) +static int bch2_flush_inode(struct bch_fs *c, + struct bch_inode_info *inode) { - struct bch_inode_unpacked inode; + struct bch_inode_unpacked u; int ret; if (c->opts.journal_flush_disabled) return 0; - ret = bch2_inode_find_by_inum(c, inum, &inode); + ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u); if (ret) return ret; - return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq); + return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?: + bch2_inode_flush_nocow_writes(c, inode); } int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) @@ -2518,7 +2590,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync) ret = file_write_and_wait_range(file, start, end); ret2 = sync_inode_metadata(&inode->v, 1); - ret3 = bch2_flush_inode(c, inode_inum(inode)); + ret3 = bch2_flush_inode(c, inode); return bch2_err_class(ret ?: ret2 ?: ret3); } @@ -3105,6 +3177,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, continue; } + /* + * XXX: for nocow mode, we should promote shared extents to + * unshared here + */ + sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset; if (!bkey_extent_is_allocation(k.k)) { @@ -3368,7 +3445,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src, if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) || IS_SYNC(file_inode(file_dst))) - ret = bch2_flush_inode(c, inode_inum(dst)); + ret = bch2_flush_inode(c, dst); err: bch2_quota_reservation_put(c, dst, "a_res); bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst); @@ -3622,6 +3699,7 @@ loff_t bch2_llseek(struct file *file, loff_t offset, int whence) void bch2_fs_fsio_exit(struct bch_fs *c) { + bioset_exit(&c->nocow_flush_bioset); bioset_exit(&c->dio_write_bioset); bioset_exit(&c->dio_read_bioset); bioset_exit(&c->writepage_bioset); @@ -3641,7 +3719,9 @@ int bch2_fs_fsio_init(struct bch_fs *c) BIOSET_NEED_BVECS) || bioset_init(&c->dio_write_bioset, 4, offsetof(struct dio_write, op.wbio.bio), - BIOSET_NEED_BVECS)) + BIOSET_NEED_BVECS) || + bioset_init(&c->nocow_flush_bioset, + 1, offsetof(struct nocow_flush, bio), 0)) ret = -ENOMEM; pr_verbose_init(c->opts, "ret %i", ret); diff --git a/fs/bcachefs/fs.h b/fs/bcachefs/fs.h index 4164d0669d70..e1c73a38c607 100644 --- a/fs/bcachefs/fs.h +++ b/fs/bcachefs/fs.h @@ -25,6 +25,17 @@ struct bch_inode_info { u32 ei_subvol; + /* + * When we've been doing nocow writes we'll need to issue flushes to the + * underlying block devices + * + * XXX: a device may have had a flush issued by some other codepath. It + * would be better to keep for each device a sequence number that's + * incremented when we isusue a cache flush, and track here the sequence + * number that needs flushing. + */ + struct bch_devs_mask ei_devs_need_flush; + /* copy of inode in btree: */ struct bch_inode_unpacked ei_inode; }; diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c index f338cf6fd8b7..a98e40065122 100644 --- a/fs/bcachefs/inode.c +++ b/fs/bcachefs/inode.c @@ -892,4 +892,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c, #define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name); BCH_INODE_OPTS() #undef x + + if (opts->nocow) + opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0; } diff --git a/fs/bcachefs/io.c b/fs/bcachefs/io.c index 1d0ec638f645..d511bd664953 100644 --- a/fs/bcachefs/io.c +++ b/fs/bcachefs/io.c @@ -34,6 +34,7 @@ #include "trace.h" #include <linux/blkdev.h> +#include <linux/prefetch.h> #include <linux/random.h> #include <linux/sched/mm.h> @@ -375,24 +376,118 @@ int bch2_extent_fallocate(struct btree_trans *trans, s64 *i_sectors_delta, struct write_point_specifier write_point) { - int ret; struct bch_fs *c = trans->c; struct disk_reservation disk_res = { 0 }; - struct bkey_i_reservation *reservation = - bch2_trans_kmalloc(trans, sizeof(*reservation)); + struct closure cl; + struct open_buckets open_buckets; + struct bkey_s_c k; + struct bkey_buf old, new; + bool have_reservation = false; + bool unwritten = opts.nocow && + c->sb.version >= bcachefs_metadata_version_unwritten_extents; + int ret; - ret = PTR_ERR_OR_ZERO(reservation); + bch2_bkey_buf_init(&old); + bch2_bkey_buf_init(&new); + closure_init_stack(&cl); + open_buckets.nr = 0; +retry: + k = bch2_btree_iter_peek_slot(iter); + ret = bkey_err(k); if (ret) return ret; - bkey_reservation_init(&reservation->k_i); - reservation->k.p = iter->pos; - bch2_key_resize(&reservation->k, sectors); - reservation->v.nr_replicas = opts.data_replicas; + sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset); + + if (!have_reservation) { + unsigned new_replicas = + max(0, (int) opts.data_replicas - + (int) bch2_bkey_nr_ptrs_fully_allocated(k)); + /* + * Get a disk reservation before (in the nocow case) calling + * into the allocator: + */ + ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0); + if (unlikely(ret)) + goto out; + + bch2_bkey_buf_reassemble(&old, c, k); + } + + if (have_reservation) { + if (!bch2_extents_match(k, bkey_i_to_s_c(old.k))) + goto out; + + bch2_key_resize(&new.k->k, sectors); + } else if (!unwritten) { + struct bkey_i_reservation *reservation; + + bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64)); + reservation = bkey_reservation_init(new.k); + reservation->k.p = iter->pos; + bch2_key_resize(&reservation->k, sectors); + reservation->v.nr_replicas = opts.data_replicas; + } else { + struct bkey_i_extent *e; + struct bch_devs_list devs_have; + struct write_point *wp; + struct bch_extent_ptr *ptr; + + devs_have.nr = 0; + + bch2_bkey_buf_realloc(&new, c, BKEY_EXTENT_U64s_MAX); + + e = bkey_extent_init(new.k); + e->k.p = iter->pos; + + ret = bch2_alloc_sectors_start_trans(trans, + opts.foreground_target, + false, + write_point, + &devs_have, + opts.data_replicas, + opts.data_replicas, + RESERVE_none, 0, &cl, &wp); + if (bch2_err_matches(ret, BCH_ERR_operation_blocked)) { + bch2_trans_unlock(trans); + closure_sync(&cl); + goto retry; + } + if (ret) + return ret; + + sectors = min(sectors, wp->sectors_free); + + bch2_key_resize(&e->k, sectors); + + bch2_open_bucket_get(c, wp, &open_buckets); + bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, sectors, false); + bch2_alloc_sectors_done(c, wp); + + extent_for_each_ptr(extent_i_to_s(e), ptr) + ptr->unwritten = true; + } + + have_reservation = true; - ret = bch2_extent_update(trans, inum, iter, &reservation->k_i, &disk_res, + ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res, 0, i_sectors_delta, true); +out: + if ((atomic_read(&cl.remaining) & CLOSURE_REMAINING_MASK) != 1) { + bch2_trans_unlock(trans); + closure_sync(&cl); + } + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { + bch2_trans_begin(trans); + goto retry; + } + + bch2_open_buckets_put(c, &open_buckets); bch2_disk_reservation_put(c, &disk_res); + bch2_bkey_buf_exit(&new, c); + bch2_bkey_buf_exit(&old, c); + return ret; } @@ -539,7 +634,8 @@ static int bch2_write_index_default(struct bch_write_op *op) void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, enum bch_data_type type, - const struct bkey_i *k) + const struct bkey_i *k, + bool nocow) { struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k)); const struct bch_extent_ptr *ptr; @@ -573,8 +669,9 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c, n->c = c; n->dev = ptr->dev; - n->have_ioref = bch2_dev_get_ioref(ca, + n->have_ioref = nocow || bch2_dev_get_ioref(ca, type == BCH_DATA_btree ? READ : WRITE); + n->nocow = nocow; n->submit_time = local_clock(); n->inode_offset = bkey_start_offset(&k->k); n->bio.bi_iter.bi_sector = ptr->offset; @@ -801,6 +898,9 @@ static void bch2_write_endio(struct bio *bio) op->flags |= BCH_WRITE_IO_ERROR; } + if (wbio->nocow) + set_bit(wbio->dev, op->devs_need_flush->d); + if (wbio->have_ioref) { bch2_latency_acct(ca, wbio->submit_time, WRITE); percpu_ref_put(&ca->io_ref); @@ -1221,6 +1321,321 @@ err: return ret; } +static bool bch2_extent_is_writeable(struct bch_write_op *op, + struct bkey_s_c k) +{ + struct bch_fs *c = op->c; + struct bkey_s_c_extent e; + struct extent_ptr_decoded p; + const union bch_extent_entry *entry; + unsigned replicas = 0; + + if (k.k->type != KEY_TYPE_extent) + return false; + + e = bkey_s_c_to_extent(k); + extent_for_each_ptr_decode(e, p, entry) { + if (p.crc.csum_type || + crc_is_compressed(p.crc) || + p.has_ec) + return false; + + replicas += bch2_extent_ptr_durability(c, &p); + } + + return replicas >= op->opts.data_replicas; +} + +static inline void bch2_nocow_write_unlock(struct bch_write_op *op) +{ + struct bch_fs *c = op->c; + const struct bch_extent_ptr *ptr; + struct bkey_i *k; + + for_each_keylist_key(&op->insert_keys, k) { + struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k)); + + bkey_for_each_ptr(ptrs, ptr) + bch2_bucket_nocow_unlock(&c->nocow_locks, + PTR_BUCKET_POS(c, ptr), + BUCKET_NOCOW_LOCK_UPDATE); + } +} + +static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_i *orig, + struct bkey_s_c k, + u64 new_i_size) +{ + struct bkey_i *new; + struct bkey_ptrs ptrs; + struct bch_extent_ptr *ptr; + int ret; + + if (!bch2_extents_match(bkey_i_to_s_c(orig), k)) { + /* trace this */ + return 0; + } + + new = bch2_bkey_make_mut(trans, k); + ret = PTR_ERR_OR_ZERO(new); + if (ret) + return ret; + + bch2_cut_front(bkey_start_pos(&orig->k), new); + bch2_cut_back(orig->k.p, new); + + ptrs = bch2_bkey_ptrs(bkey_i_to_s(new)); + bkey_for_each_ptr(ptrs, ptr) + ptr->unwritten = 0; + + /* + * Note that we're not calling bch2_subvol_get_snapshot() in this path - + * that was done when we kicked off the write, and here it's important + * that we update the extent that we wrote to - even if a snapshot has + * since been created. The write is still outstanding, so we're ok + * w.r.t. snapshot atomicity: + */ + return bch2_extent_update_i_size_sectors(trans, iter, + min(new->k.p.offset << 9, new_i_size), 0) ?: + bch2_trans_update(trans, iter, new, + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); +} + +static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op) +{ + struct bch_fs *c = op->c; + struct btree_trans trans; + struct btree_iter iter; + struct bkey_i *orig; + struct bkey_s_c k; + int ret; + + bch2_trans_init(&trans, c, 0, 0); + + for_each_keylist_key(&op->insert_keys, orig) { + ret = for_each_btree_key_upto_commit(&trans, iter, BTREE_ID_extents, + bkey_start_pos(&orig->k), orig->k.p, + BTREE_ITER_INTENT, k, + NULL, NULL, BTREE_INSERT_NOFAIL, ({ + bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size); + })); + + if (ret && !bch2_err_matches(ret, EROFS)) { + struct bkey_i *k = bch2_keylist_front(&op->insert_keys); + + bch_err_inum_offset_ratelimited(c, + k->k.p.inode, k->k.p.offset << 9, + "write error while doing btree update: %s", + bch2_err_str(ret)); + } + + if (ret) { + op->error = ret; + break; + } + } + + bch2_trans_exit(&trans); +} + +static void __bch2_nocow_write_done(struct bch_write_op *op) +{ + bch2_nocow_write_unlock(op); + + if (unlikely(op->flags & BCH_WRITE_IO_ERROR)) { + op->error = -EIO; + } else if (unlikely(op->flags & BCH_WRITE_CONVERT_UNWRITTEN)) + bch2_nocow_write_convert_unwritten(op); +} + +static void bch2_nocow_write_done(struct closure *cl) +{ + struct bch_write_op *op = container_of(cl, struct bch_write_op, cl); + + __bch2_nocow_write_done(op); + bch2_write_done(cl); +} + +static void bch2_nocow_write(struct bch_write_op *op) +{ + struct bch_fs *c = op->c; + struct btree_trans trans; + struct btree_iter iter; + struct bkey_s_c k; + struct bkey_ptrs_c ptrs; + const struct bch_extent_ptr *ptr, *ptr2; + struct { + struct bpos b; + unsigned gen; + two_state_lock_t *l; + } buckets[BCH_REPLICAS_MAX]; + unsigned nr_buckets = 0; + u32 snapshot; + int ret, i; + + if (op->flags & BCH_WRITE_MOVE) + return; + + bch2_trans_init(&trans, c, 0, 0); +retry: + bch2_trans_begin(&trans); + + ret = bch2_subvolume_get_snapshot(&trans, op->subvol, &snapshot); + if (unlikely(ret)) + goto err; + + bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, + SPOS(op->pos.inode, op->pos.offset, snapshot), + BTREE_ITER_SLOTS); + while (1) { + struct bio *bio = &op->wbio.bio; + + nr_buckets = 0; + + k = bch2_btree_iter_peek_slot(&iter); + ret = bkey_err(k); + if (ret) + break; + + /* fall back to normal cow write path? */ + if (unlikely(k.k->p.snapshot != snapshot || + !bch2_extent_is_writeable(op, k))) + break; + + if (bch2_keylist_realloc(&op->insert_keys, + op->inline_keys, + ARRAY_SIZE(op->inline_keys), + k.k->u64s)) + break; + + /* Get iorefs before dropping btree locks: */ + ptrs = bch2_bkey_ptrs_c(k); + bkey_for_each_ptr(ptrs, ptr) { + buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr); + buckets[nr_buckets].gen = ptr->gen; + buckets[nr_buckets].l = + bucket_nocow_lock(&c->nocow_locks, buckets[nr_buckets].b); + + prefetch(buckets[nr_buckets].l); + nr_buckets++; + + if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE))) + goto err_get_ioref; + + if (ptr->unwritten) + op->flags |= BCH_WRITE_CONVERT_UNWRITTEN; + } + + /* Unlock before taking nocow locks, doing IO: */ + bkey_reassemble(op->insert_keys.top, k); + bch2_trans_unlock(&trans); + + bch2_cut_front(op->pos, op->insert_keys.top); + if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN) + bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top); + + for (i = 0; i < nr_buckets; i++) { + struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode); + two_state_lock_t *l = buckets[i].l; + bool stale; + + if (!bch2_two_state_trylock(l, BUCKET_NOCOW_LOCK_UPDATE)) + __bch2_bucket_nocow_lock(&c->nocow_locks, l, BUCKET_NOCOW_LOCK_UPDATE); + + rcu_read_lock(); + stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen); + rcu_read_unlock(); + + if (unlikely(stale)) + goto err_bucket_stale; + } + + bio = &op->wbio.bio; + if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) { + bio = bio_split(bio, k.k->p.offset - op->pos.offset, + GFP_KERNEL, &c->bio_write); + wbio_init(bio)->put_bio = true; + bio->bi_opf = op->wbio.bio.bi_opf; + } else { + op->flags |= BCH_WRITE_DONE; + } + + op->pos.offset += bio_sectors(bio); + op->written += bio_sectors(bio); + + bio->bi_end_io = bch2_write_endio; + bio->bi_private = &op->cl; + bio->bi_opf |= REQ_OP_WRITE; + closure_get(&op->cl); + bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, + op->insert_keys.top, true); + + bch2_keylist_push(&op->insert_keys); + if (op->flags & BCH_WRITE_DONE) + break; + bch2_btree_iter_advance(&iter); + } +out: + bch2_trans_iter_exit(&trans, &iter); +err: + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + goto retry; + + if (ret) { + bch_err_inum_offset_ratelimited(c, + op->pos.inode, + op->pos.offset << 9, + "%s: btree lookup error %s", + __func__, bch2_err_str(ret)); + op->error = ret; + op->flags |= BCH_WRITE_DONE; + } + + bch2_trans_exit(&trans); + + /* fallback to cow write path? */ + if (!(op->flags & BCH_WRITE_DONE)) { + closure_sync(&op->cl); + __bch2_nocow_write_done(op); + op->insert_keys.top = op->insert_keys.keys; + } else if (op->flags & BCH_WRITE_SYNC) { + closure_sync(&op->cl); + bch2_nocow_write_done(&op->cl); + } else { + /* + * XXX + * needs to run out of process context because ei_quota_lock is + * a mutex + */ + continue_at(&op->cl, bch2_nocow_write_done, index_update_wq(op)); + } + return; +err_get_ioref: + bkey_for_each_ptr(ptrs, ptr2) { + if (ptr2 == ptr) + break; + + percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref); + } + + /* Fall back to COW path: */ + goto out; +err_bucket_stale: + while (--i >= 0) + bch2_bucket_nocow_unlock(&c->nocow_locks, + buckets[i].b, + BUCKET_NOCOW_LOCK_UPDATE); + + bkey_for_each_ptr(ptrs, ptr2) + percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref); + + /* We can retry this: */ + ret = BCH_ERR_transaction_restart; + goto out; +} + static void __bch2_write(struct bch_write_op *op) { struct bch_fs *c = op->c; @@ -1230,6 +1645,12 @@ static void __bch2_write(struct bch_write_op *op) int ret; nofs_flags = memalloc_nofs_save(); + + if (unlikely(op->opts.nocow)) { + bch2_nocow_write(op); + if (op->flags & BCH_WRITE_DONE) + goto out_nofs_restore; + } again: memset(&op->failed, 0, sizeof(op->failed)); op->btree_update_ready = false; @@ -1310,7 +1731,7 @@ err: key_to_write_offset); bch2_submit_wbio_replicas(to_wbio(bio), c, BCH_DATA_user, - key_to_write); + key_to_write, false); } while (ret); /* @@ -1332,7 +1753,7 @@ err: } else { continue_at(&op->cl, bch2_write_index, NULL); } - +out_nofs_restore: memalloc_nofs_restore(nofs_flags); } @@ -2563,6 +2984,11 @@ void bch2_fs_io_exit(struct bch_fs *c) int bch2_fs_io_init(struct bch_fs *c) { + unsigned i; + + for (i = 0; i < ARRAY_SIZE(c->nocow_locks.l); i++) + two_state_lock_init(&c->nocow_locks.l[i]); + if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio), BIOSET_NEED_BVECS) || bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio), diff --git a/fs/bcachefs/io.h b/fs/bcachefs/io.h index aafe1bf993bb..77a4a1cef71c 100644 --- a/fs/bcachefs/io.h +++ b/fs/bcachefs/io.h @@ -22,7 +22,7 @@ static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw #endif void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *, - enum bch_data_type, const struct bkey_i *); + enum bch_data_type, const struct bkey_i *, bool); #define BLK_STS_REMOVED ((__force blk_status_t)128) @@ -43,6 +43,7 @@ enum bch_write_flags { __BCH_WRITE_IN_WORKER, __BCH_WRITE_DONE, __BCH_WRITE_IO_ERROR, + __BCH_WRITE_CONVERT_UNWRITTEN, }; #define BCH_WRITE_ALLOC_NOWAIT (1U << __BCH_WRITE_ALLOC_NOWAIT) @@ -61,6 +62,7 @@ enum bch_write_flags { #define BCH_WRITE_IN_WORKER (1U << __BCH_WRITE_IN_WORKER) #define BCH_WRITE_DONE (1U << __BCH_WRITE_DONE) #define BCH_WRITE_IO_ERROR (1U << __BCH_WRITE_IO_ERROR) +#define BCH_WRITE_CONVERT_UNWRITTEN (1U << __BCH_WRITE_CONVERT_UNWRITTEN) static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op) { @@ -90,7 +92,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->flags = 0; op->written = 0; op->error = 0; - op->csum_type = bch2_data_checksum_type(c, opts.data_checksum); + op->csum_type = bch2_data_checksum_type(c, opts); op->compression_type = bch2_compression_opt_to_type[opts.compression]; op->nr_replicas = 0; op->nr_replicas_required = c->opts.data_replicas_required; @@ -107,6 +109,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c, op->res = (struct disk_reservation) { 0 }; op->new_i_size = U64_MAX; op->i_sectors_delta = 0; + op->devs_need_flush = NULL; } void bch2_write(struct closure *); diff --git a/fs/bcachefs/io_types.h b/fs/bcachefs/io_types.h index 8e83ce5bc805..200af9e3e6b0 100644 --- a/fs/bcachefs/io_types.h +++ b/fs/bcachefs/io_types.h @@ -97,6 +97,7 @@ struct bch_write_bio { bounce:1, put_bio:1, have_ioref:1, + nocow:1, used_mempool:1, first_btree_write:1; ); @@ -151,6 +152,12 @@ struct bch_write_op { struct keylist insert_keys; u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2]; + /* + * Bitmask of devices that have had nocow writes issued to them since + * last flush: + */ + struct bch_devs_mask *devs_need_flush; + /* Must be last: */ struct bch_write_bio wbio; }; diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 52f126a0bb73..9e453b8495e8 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -260,6 +260,12 @@ static int bch2_move_extent(struct btree_trans *trans, if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_move)) return -BCH_ERR_erofs_no_writes; + /* + * Before memory allocations & taking nocow locks in + * bch2_data_update_init(): + */ + bch2_trans_unlock(trans); + /* write path might have to decompress data: */ bkey_for_each_ptr_decode(k.k, ptrs, p, entry) sectors = max_t(unsigned, sectors, p.crc.uncompressed_size); @@ -506,6 +512,7 @@ static int __bch2_move_data(struct moving_context *ctxt, */ bch2_bkey_buf_reassemble(&sk, c, k); k = bkey_i_to_s_c(sk.k); + bch2_trans_unlock(&trans); ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts, btree_id, k, data_opts); diff --git a/fs/bcachefs/nocow_locking.c b/fs/bcachefs/nocow_locking.c new file mode 100644 index 000000000000..8f06e08370a2 --- /dev/null +++ b/fs/bcachefs/nocow_locking.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "bcachefs.h" +#include "nocow_locking.h" +#include "util.h" + +void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t, + two_state_lock_t *l, int flags) +{ + struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks); + u64 start_time = local_clock(); + + bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE); + bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time); +} diff --git a/fs/bcachefs/nocow_locking.h b/fs/bcachefs/nocow_locking.h new file mode 100644 index 000000000000..2a7a9f44e88e --- /dev/null +++ b/fs/bcachefs/nocow_locking.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BCACHEFS_NOCOW_LOCKING_H +#define _BCACHEFS_NOCOW_LOCKING_H + +#include "bcachefs_format.h" +#include "two_state_shared_lock.h" + +#include <linux/hash.h> + +#define BUCKET_NOCOW_LOCKS_BITS 10 +#define BUCKET_NOCOW_LOCKS (1U << BUCKET_NOCOW_LOCKS_BITS) + +struct bucket_nocow_lock_table { + two_state_lock_t l[BUCKET_NOCOW_LOCKS]; +}; + +#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0) + +static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table *t, + struct bpos bucket) +{ + u64 dev_bucket = bucket.inode << 56 | bucket.offset; + unsigned h = hash_64(dev_bucket, BUCKET_NOCOW_LOCKS_BITS); + + return t->l + (h & (BUCKET_NOCOW_LOCKS - 1)); +} + +static inline bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, + struct bpos bucket) +{ + two_state_lock_t *l = bucket_nocow_lock(t, bucket); + + return atomic_long_read(&l->v) != 0; +} + +static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t, + struct bpos bucket, int flags) +{ + two_state_lock_t *l = bucket_nocow_lock(t, bucket); + + bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE); +} + +void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, two_state_lock_t *, int); + +static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t, + struct bpos bucket, int flags) +{ + two_state_lock_t *l = bucket_nocow_lock(t, bucket); + + if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE)) + __bch2_bucket_nocow_lock(t, l, flags); +} + +#endif /* _BCACHEFS_NOCOW_LOCKING_H */ diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h index 85927b306014..ef1b8a03f149 100644 --- a/fs/bcachefs/opts.h +++ b/fs/bcachefs/opts.h @@ -392,6 +392,13 @@ enum opt_type { OPT_BOOL(), \ BCH2_NO_SB_OPT, false, \ NULL, NULL) \ + x(nocow, u8, \ + OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \ + OPT_BOOL(), \ + BCH_SB_NOCOW, false, \ + NULL, "Nocow mode: Writes will be done in place when possible.\n"\ + "Snapshots and reflink will still caused writes to be COW\n"\ + "Implicitly disables data checksumming, compression and encryption")\ x(no_data_io, u8, \ OPT_MOUNT, \ OPT_BOOL(), \ diff --git a/fs/bcachefs/super.h b/fs/bcachefs/super.h index 5e6fbbfd2d43..36bcb9ec2b3a 100644 --- a/fs/bcachefs/super.h +++ b/fs/bcachefs/super.h @@ -88,9 +88,10 @@ static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, unsigned dev) { - BUG_ON(bch2_dev_list_has_dev(*devs, dev)); - BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs)); - devs->devs[devs->nr++] = dev; + if (!bch2_dev_list_has_dev(*devs, dev)) { + BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs)); + devs->devs[devs->nr++] = dev; + } } static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index fabee8302afa..24dd2defe7c7 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -543,6 +543,7 @@ DECLARE_EVENT_CLASS(bucket_alloc, __field(u64, need_journal_commit ) __field(u64, nouse ) __field(bool, nonblocking ) + __field(u64, nocow ) __array(char, err, 32 ) ), @@ -560,10 +561,11 @@ DECLARE_EVENT_CLASS(bucket_alloc, __entry->need_journal_commit = s->skipped_need_journal_commit; __entry->nouse = s->skipped_nouse; __entry->nonblocking = nonblocking; + __entry->nocow = s->skipped_nocow; strscpy(__entry->err, err, sizeof(__entry->err)); ), - TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s", + TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->reserve, __entry->user, @@ -576,6 +578,7 @@ DECLARE_EVENT_CLASS(bucket_alloc, __entry->open, __entry->need_journal_commit, __entry->nouse, + __entry->nocow, __entry->nonblocking, __entry->err) ); |