From 0ecb9529a4d47825778e7b0d226eb36019252a9d Mon Sep 17 00:00:00 2001 From: Harvey Harrison Date: Fri, 24 Oct 2008 10:52:57 -0700 Subject: UBIFS: endian handling fixes and annotations Noticed by sparse: fs/ubifs/file.c:75:2: warning: restricted __le64 degrades to integer fs/ubifs/file.c:629:4: warning: restricted __le64 degrades to integer fs/ubifs/dir.c:431:3: warning: restricted __le64 degrades to integer This should be checked to ensure the ubifs_assert is working as intended, I've done the suggested annotation in this patch. fs/ubifs/sb.c:298:6: warning: incorrect type in assignment (different base types) fs/ubifs/sb.c:298:6: expected int [signed] [assigned] tmp fs/ubifs/sb.c:298:6: got restricted __le64 [usertype] fs/ubifs/sb.c:299:19: warning: incorrect type in assignment (different base types) fs/ubifs/sb.c:299:19: expected restricted __le64 [usertype] atime_sec fs/ubifs/sb.c:299:19: got int [signed] [assigned] tmp fs/ubifs/sb.c:300:19: warning: incorrect type in assignment (different base types) fs/ubifs/sb.c:300:19: expected restricted __le64 [usertype] ctime_sec fs/ubifs/sb.c:300:19: got int [signed] [assigned] tmp fs/ubifs/sb.c:301:19: warning: incorrect type in assignment (different base types) fs/ubifs/sb.c:301:19: expected restricted __le64 [usertype] mtime_sec fs/ubifs/sb.c:301:19: got int [signed] [assigned] tmp This looks like a bugfix as your tmp was a u32 so there was truncation in the atime, mtime, ctime value, probably not intentional, add a tmp_le64 and use it here. fs/ubifs/key.h:348:9: warning: cast to restricted __le32 fs/ubifs/key.h:348:9: warning: cast to restricted __le32 fs/ubifs/key.h:419:9: warning: cast to restricted __le32 Read from the annotated union member instead. fs/ubifs/recovery.c:175:13: warning: incorrect type in assignment (different base types) fs/ubifs/recovery.c:175:13: expected unsigned int [unsigned] [usertype] save_flags fs/ubifs/recovery.c:175:13: got restricted __le32 [usertype] flags fs/ubifs/recovery.c:186:13: warning: incorrect type in assignment (different base types) fs/ubifs/recovery.c:186:13: expected restricted __le32 [usertype] flags fs/ubifs/recovery.c:186:13: got unsigned int [unsigned] [usertype] save_flags Do byteshifting at compile time of the flag value. Annotate the saved_flags as le32. fs/ubifs/debug.c:368:10: warning: cast to restricted __le32 fs/ubifs/debug.c:368:10: warning: cast from restricted __le64 Should be checked if the truncation was intentional, I've changed the printk to print the full width. Signed-off-by: Harvey Harrison Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'fs/ubifs/file.c') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 51cf511d44d9..9124eee73aea 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block, return err; } - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); if (len <= 0 || len > UBIFS_BLOCK_SIZE) @@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, dn = bu->buf + (bu->zbranch[nn].offs - offs); - ubifs_assert(dn->ch.sqnum > + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); len = le32_to_cpu(dn->size); -- cgit v1.2.3 From 39ce81ce7168aa7226fb9f182c3a2b57060d0905 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 18 Nov 2008 18:09:49 +0200 Subject: UBIFS: do not print scary memory allocation warnings Bulk-read allocates a lot of memory with 'kmalloc()', and when it is/gets fragmented 'kmalloc()' fails with a scarry warning. But because bulk-read is just an optimization, UBIFS keeps working fine. Supress the warning by passing __GFP_NOWARN option to 'kmalloc()'. This patch also introduces a macro for the magic 128KiB constant. This is just neater. Note, this is not really fixes the problem we had, but just hides the warnings. The further patches fix the problem. Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 4 ++-- fs/ubifs/super.c | 17 ++++++++++++----- fs/ubifs/ubifs.h | 2 +- 3 files changed, 15 insertions(+), 8 deletions(-) (limited to 'fs/ubifs/file.c') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 9124eee73aea..8be827cc7078 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -705,12 +705,12 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) int err, page_idx, page_cnt, ret = 0, n = 0; loff_t isize; - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); if (!bu) return 0; bu->buf_len = c->bulk_read_buf_size; - bu->buf = kmalloc(bu->buf_len, GFP_NOFS); + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); if (!bu->buf) goto out_free; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 8780efbf40ac..ea493e6f2652 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -36,6 +36,12 @@ #include #include "ubifs.h" +/* + * Maximum amount of memory we may 'kmalloc()' without worrying that we are + * allocating too much. + */ +#define UBIFS_KMALLOC_OK (128*1024) + /* Slab cache for UBIFS inodes */ struct kmem_cache *ubifs_inode_slab; @@ -561,17 +567,18 @@ static int init_constants_early(struct ubifs_info *c) * calculations when reporting free space. */ c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; + /* Buffer size for bulk-reads */ c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; if (c->bulk_read_buf_size > c->leb_size) c->bulk_read_buf_size = c->leb_size; - if (c->bulk_read_buf_size > 128 * 1024) { - /* Check if we can kmalloc more than 128KiB */ - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); - + if (c->bulk_read_buf_size > UBIFS_KMALLOC_OK) { + /* Check if we can kmalloc that much */ + void *try = kmalloc(c->bulk_read_buf_size, + GFP_KERNEL | __GFP_NOWARN); kfree(try); if (!try) - c->bulk_read_buf_size = 128 * 1024; + c->bulk_read_buf_size = UBIFS_KMALLOC_OK; } return 0; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index a7bd32fa15b9..06ba51efd65d 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -753,7 +753,7 @@ struct ubifs_znode { }; /** - * struct bu_info - bulk-read information + * struct bu_info - bulk-read information. * @key: first data node key * @zbranch: zbranches of data nodes to bulk read * @buf: buffer to read into -- cgit v1.2.3 From 6c0c42cdfd73fb161417403d8d077cb136e10bbf Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Tue, 18 Nov 2008 20:20:05 +0200 Subject: UBIFS: do not allocate too much Bulk-read allocates 128KiB or more using kmalloc. The allocation starts failing often when the memory gets fragmented. UBIFS still works fine in this case, because it falls-back to standard (non-optimized) read method, though. This patch teaches bulk-read to allocate exactly the amount of memory it needs, instead of allocating 128KiB every time. This patch is also a preparation to the further fix where we'll have a pre-allocated bulk-read buffer as well. For example, now the @bu object is prepared in 'ubifs_bulk_read()', so we could path either pre-allocated or allocated information to 'ubifs_do_bulk_read()' later. Or teaching 'ubifs_do_bulk_read()' not to allocate 'bu->buf' if it is already there. Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 70 +++++++++++++++++++++++++++++++++++++------------------- fs/ubifs/super.c | 12 +++++----- fs/ubifs/tnc.c | 7 +++++- fs/ubifs/ubifs.h | 4 ++-- 4 files changed, 60 insertions(+), 33 deletions(-) (limited to 'fs/ubifs/file.c') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 8be827cc7078..0c5c27d63f6e 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -691,32 +691,22 @@ out_err: /** * ubifs_do_bulk_read - do bulk-read. * @c: UBIFS file-system description object - * @page1: first page + * @bu: bulk-read information + * @page1: first page to read * * This function returns %1 if the bulk-read is done, otherwise %0 is returned. */ -static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) +static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, + struct page *page1) { pgoff_t offset = page1->index, end_index; struct address_space *mapping = page1->mapping; struct inode *inode = mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); - struct bu_info *bu; int err, page_idx, page_cnt, ret = 0, n = 0; + int allocate = bu->buf ? 0 : 1; loff_t isize; - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); - if (!bu) - return 0; - - bu->buf_len = c->bulk_read_buf_size; - bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); - if (!bu->buf) - goto out_free; - - data_key_init(c, &bu->key, inode->i_ino, - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_tnc_get_bu_keys(c, bu); if (err) goto out_warn; @@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) * together. If all the pages were like this, bulk-read would * reduce performance, so we turn it off for a while. */ - ui->read_in_a_row = 0; - ui->bulk_read = 0; - goto out_free; + goto out_bu_off; } if (bu->cnt) { + if (allocate) { + /* + * Allocate bulk-read buffer depending on how many data + * nodes we are going to read. + */ + bu->buf_len = bu->zbranch[bu->cnt - 1].offs + + bu->zbranch[bu->cnt - 1].len - + bu->zbranch[0].offs; + ubifs_assert(bu->buf_len > 0); + ubifs_assert(bu->buf_len <= c->leb_size); + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); + if (!bu->buf) + goto out_bu_off; + } + err = ubifs_tnc_bulk_read(c, bu); if (err) goto out_warn; @@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) ui->last_page_read = offset + page_idx - 1; out_free: - kfree(bu->buf); - kfree(bu); + if (allocate) + kfree(bu->buf); return ret; out_warn: ubifs_warn("ignoring error %d and skipping bulk-read", err); goto out_free; + +out_bu_off: + ui->read_in_a_row = ui->bulk_read = 0; + goto out_free; } /** @@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page) struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; - int ret = 0; + struct bu_info *bu; + int err = 0; ui->last_page_read = index; - if (!c->bulk_read) return 0; + /* * Bulk-read is protected by ui_mutex, but it is an optimization, so * don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; + if (index != last_page_read + 1) { /* Turn off bulk-read if we stop reading sequentially */ ui->read_in_a_row = 1; @@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page) ui->bulk_read = 0; goto out_unlock; } + if (!ui->bulk_read) { ui->read_in_a_row += 1; if (ui->read_in_a_row < 3) @@ -829,10 +839,22 @@ static int ubifs_bulk_read(struct page *page) /* Three reads in a row, so switch on bulk-read */ ui->bulk_read = 1; } - ret = ubifs_do_bulk_read(c, page); + + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); + if (!bu) + return 0; + + bu->buf = NULL; + bu->buf_len = c->max_bu_buf_len; + data_key_init(c, &bu->key, inode->i_ino, + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); + + err = ubifs_do_bulk_read(c, bu, page); + kfree(bu); + out_unlock: mutex_unlock(&ui->ui_mutex); - return ret; + return err; } static int ubifs_readpage(struct file *file, struct page *page) diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index ea493e6f2652..1d511569c035 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -569,16 +569,16 @@ static int init_constants_early(struct ubifs_info *c) c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; /* Buffer size for bulk-reads */ - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; - if (c->bulk_read_buf_size > c->leb_size) - c->bulk_read_buf_size = c->leb_size; - if (c->bulk_read_buf_size > UBIFS_KMALLOC_OK) { + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; + if (c->max_bu_buf_len > c->leb_size) + c->max_bu_buf_len = c->leb_size; + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { /* Check if we can kmalloc that much */ - void *try = kmalloc(c->bulk_read_buf_size, + void *try = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); kfree(try); if (!try) - c->bulk_read_buf_size = UBIFS_KMALLOC_OK; + c->max_bu_buf_len = UBIFS_KMALLOC_OK; } return 0; } diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index 99e9a744cfd0..6eef5344a145 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -1501,7 +1501,12 @@ out: * @bu: bulk-read parameters and results * * Lookup consecutive data node keys for the same inode that reside - * consecutively in the same LEB. + * consecutively in the same LEB. This function returns zero in case of success + * and a negative error code in case of failure. + * + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares + * maxumum possible amount of nodes for bulk-read. */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 06ba51efd65d..870b5c479e95 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -969,7 +969,7 @@ struct ubifs_mount_opts { * @mst_node: master node * @mst_offs: offset of valid master node * @mst_mutex: protects the master node area, @mst_node, and @mst_offs - * @bulk_read_buf_size: buffer size for bulk-reads + * @max_bu_buf_len: maximum bulk-read buffer length * * @log_lebs: number of logical eraseblocks in the log * @log_bytes: log size in bytes @@ -1217,7 +1217,7 @@ struct ubifs_info { struct ubifs_mst_node *mst_node; int mst_offs; struct mutex mst_mutex; - int bulk_read_buf_size; + int max_bu_buf_len; int log_lebs; long long log_bytes; -- cgit v1.2.3 From 3477d204658733aa3a87d3ae03b0327c1e599517 Mon Sep 17 00:00:00 2001 From: Artem Bityutskiy Date: Wed, 19 Nov 2008 11:53:15 +0200 Subject: UBIFS: pre-allocate bulk-read buffer To avoid memory allocation failure during bulk-read, pre-allocate a bulk-read buffer, so that if there is only one bulk-reader at a time, it would just use the pre-allocated buffer and would not do any memory allocation. However, if there are more than 1 bulk- reader, then only one reader would use the pre-allocated buffer, while the other reader would allocate the buffer for itself. Signed-off-by: Artem Bityutskiy --- fs/ubifs/file.c | 31 +++++++++++++++++++++--------- fs/ubifs/super.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++--------- fs/ubifs/ubifs.h | 6 ++++++ 3 files changed, 76 insertions(+), 18 deletions(-) (limited to 'fs/ubifs/file.c') diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 0c5c27d63f6e..2624411d9758 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -811,15 +811,15 @@ static int ubifs_bulk_read(struct page *page) struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = page->index, last_page_read = ui->last_page_read; struct bu_info *bu; - int err = 0; + int err = 0, allocated = 0; ui->last_page_read = index; if (!c->bulk_read) return 0; /* - * Bulk-read is protected by ui_mutex, but it is an optimization, so - * don't bother if we cannot lock the mutex. + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, + * so don't bother if we cannot lock the mutex. */ if (!mutex_trylock(&ui->ui_mutex)) return 0; @@ -840,17 +840,30 @@ static int ubifs_bulk_read(struct page *page) ui->bulk_read = 1; } - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); - if (!bu) - return 0; + /* + * If possible, try to use pre-allocated bulk-read information, which + * is protected by @c->bu_mutex. + */ + if (mutex_trylock(&c->bu_mutex)) + bu = &c->bu; + else { + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); + if (!bu) + goto out_unlock; + + bu->buf = NULL; + allocated = 1; + } - bu->buf = NULL; bu->buf_len = c->max_bu_buf_len; data_key_init(c, &bu->key, inode->i_ino, page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); - err = ubifs_do_bulk_read(c, bu, page); - kfree(bu); + + if (!allocated) + mutex_unlock(&c->bu_mutex); + else + kfree(bu); out_unlock: mutex_unlock(&ui->ui_mutex); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 1d511569c035..d80b2aef42b6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -572,14 +572,6 @@ static int init_constants_early(struct ubifs_info *c) c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; if (c->max_bu_buf_len > c->leb_size) c->max_bu_buf_len = c->leb_size; - if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { - /* Check if we can kmalloc that much */ - void *try = kmalloc(c->max_bu_buf_len, - GFP_KERNEL | __GFP_NOWARN); - kfree(try); - if (!try) - c->max_bu_buf_len = UBIFS_KMALLOC_OK; - } return 0; } @@ -998,6 +990,34 @@ static void destroy_journal(struct ubifs_info *c) free_buds(c); } +/** + * bu_init - initialize bulk-read information. + * @c: UBIFS file-system description object + */ +static void bu_init(struct ubifs_info *c) +{ + ubifs_assert(c->bulk_read == 1); + + if (c->bu.buf) + return; /* Already initialized */ + +again: + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); + if (!c->bu.buf) { + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { + c->max_bu_buf_len = UBIFS_KMALLOC_OK; + goto again; + } + + /* Just disable bulk-read */ + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " + "disabling it", c->max_bu_buf_len); + c->mount_opts.bulk_read = 1; + c->bulk_read = 0; + return; + } +} + /** * mount_ubifs - mount UBIFS file-system. * @c: UBIFS file-system description object @@ -1066,6 +1086,13 @@ static int mount_ubifs(struct ubifs_info *c) goto out_free; } + if (c->bulk_read == 1) + bu_init(c); + + /* + * We have to check all CRCs, even for data nodes, when we mount the FS + * (specifically, when we are replaying). + */ c->always_chk_crc = 1; err = ubifs_read_superblock(c); @@ -1296,6 +1323,7 @@ out_cbuf: out_dereg: dbg_failure_mode_deregistration(c); out_free: + kfree(c->bu.buf); vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); @@ -1332,10 +1360,11 @@ static void ubifs_umount(struct ubifs_info *c) kfree(c->cbuf); kfree(c->rcvrd_mst_node); kfree(c->mst_node); + kfree(c->bu.buf); + vfree(c->ileb_buf); vfree(c->sbuf); kfree(c->bottom_up_buf); UBIFS_DBG(vfree(c->dbg_buf)); - vfree(c->ileb_buf); dbg_failure_mode_deregistration(c); } @@ -1633,6 +1662,7 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) ubifs_err("invalid or unknown remount parameter"); return err; } + if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { err = ubifs_remount_rw(c); if (err) @@ -1640,6 +1670,14 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) ubifs_remount_ro(c); + if (c->bulk_read == 1) + bu_init(c); + else { + dbg_gen("disable bulk-read"); + kfree(c->bu.buf); + c->bu.buf = NULL; + } + return 0; } @@ -1730,6 +1768,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) mutex_init(&c->log_mutex); mutex_init(&c->mst_mutex); mutex_init(&c->umount_mutex); + mutex_init(&c->bu_mutex); init_waitqueue_head(&c->cmt_wq); c->buds = RB_ROOT; c->old_idx = RB_ROOT; diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 870b5c479e95..46b172560a06 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -969,7 +969,10 @@ struct ubifs_mount_opts { * @mst_node: master node * @mst_offs: offset of valid master node * @mst_mutex: protects the master node area, @mst_node, and @mst_offs + * * @max_bu_buf_len: maximum bulk-read buffer length + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu + * @bu: pre-allocated bulk-read information * * @log_lebs: number of logical eraseblocks in the log * @log_bytes: log size in bytes @@ -1217,7 +1220,10 @@ struct ubifs_info { struct ubifs_mst_node *mst_node; int mst_offs; struct mutex mst_mutex; + int max_bu_buf_len; + struct mutex bu_mutex; + struct bu_info bu; int log_lebs; long long log_bytes; -- cgit v1.2.3