summaryrefslogtreecommitdiffstats
path: root/fs/ubifs/file.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2008-12-20 10:54:54 +0100
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2008-12-20 10:54:54 +0100
commit93b8eef1c098efbea2f1fc0be7e3c681f259a7e7 (patch)
tree462cc8c2bc07bbc825dab2a200891a28d8643329 /fs/ubifs/file.c
parentInput: libps2 - handle 0xfc responses from devices (diff)
parentLinux 2.6.28-rc9 (diff)
downloadlinux-93b8eef1c098efbea2f1fc0be7e3c681f259a7e7.tar.xz
linux-93b8eef1c098efbea2f1fc0be7e3c681f259a7e7.zip
Merge commit 'v2.6.28-rc9' into next
Diffstat (limited to 'fs/ubifs/file.c')
-rw-r--r--fs/ubifs/file.c91
1 files changed, 63 insertions, 28 deletions
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 51cf511d44d9..2624411d9758 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -72,7 +72,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
return err;
}
- ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum);
+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
if (len <= 0 || len > UBIFS_BLOCK_SIZE)
@@ -626,7 +626,7 @@ static int populate_page(struct ubifs_info *c, struct page *page,
dn = bu->buf + (bu->zbranch[nn].offs - offs);
- ubifs_assert(dn->ch.sqnum >
+ ubifs_assert(le64_to_cpu(dn->ch.sqnum) >
ubifs_inode(inode)->creat_sqnum);
len = le32_to_cpu(dn->size);
@@ -691,32 +691,22 @@ out_err:
/**
* ubifs_do_bulk_read - do bulk-read.
* @c: UBIFS file-system description object
- * @page1: first page
+ * @bu: bulk-read information
+ * @page1: first page to read
*
* This function returns %1 if the bulk-read is done, otherwise %0 is returned.
*/
-static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
+static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu,
+ struct page *page1)
{
pgoff_t offset = page1->index, end_index;
struct address_space *mapping = page1->mapping;
struct inode *inode = mapping->host;
struct ubifs_inode *ui = ubifs_inode(inode);
- struct bu_info *bu;
int err, page_idx, page_cnt, ret = 0, n = 0;
+ int allocate = bu->buf ? 0 : 1;
loff_t isize;
- bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
- if (!bu)
- return 0;
-
- bu->buf_len = c->bulk_read_buf_size;
- bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
- if (!bu->buf)
- goto out_free;
-
- data_key_init(c, &bu->key, inode->i_ino,
- offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
-
err = ubifs_tnc_get_bu_keys(c, bu);
if (err)
goto out_warn;
@@ -735,12 +725,25 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
* together. If all the pages were like this, bulk-read would
* reduce performance, so we turn it off for a while.
*/
- ui->read_in_a_row = 0;
- ui->bulk_read = 0;
- goto out_free;
+ goto out_bu_off;
}
if (bu->cnt) {
+ if (allocate) {
+ /*
+ * Allocate bulk-read buffer depending on how many data
+ * nodes we are going to read.
+ */
+ bu->buf_len = bu->zbranch[bu->cnt - 1].offs +
+ bu->zbranch[bu->cnt - 1].len -
+ bu->zbranch[0].offs;
+ ubifs_assert(bu->buf_len > 0);
+ ubifs_assert(bu->buf_len <= c->leb_size);
+ bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN);
+ if (!bu->buf)
+ goto out_bu_off;
+ }
+
err = ubifs_tnc_bulk_read(c, bu);
if (err)
goto out_warn;
@@ -779,13 +782,17 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
ui->last_page_read = offset + page_idx - 1;
out_free:
- kfree(bu->buf);
- kfree(bu);
+ if (allocate)
+ kfree(bu->buf);
return ret;
out_warn:
ubifs_warn("ignoring error %d and skipping bulk-read", err);
goto out_free;
+
+out_bu_off:
+ ui->read_in_a_row = ui->bulk_read = 0;
+ goto out_free;
}
/**
@@ -803,18 +810,20 @@ static int ubifs_bulk_read(struct page *page)
struct ubifs_info *c = inode->i_sb->s_fs_info;
struct ubifs_inode *ui = ubifs_inode(inode);
pgoff_t index = page->index, last_page_read = ui->last_page_read;
- int ret = 0;
+ struct bu_info *bu;
+ int err = 0, allocated = 0;
ui->last_page_read = index;
-
if (!c->bulk_read)
return 0;
+
/*
- * Bulk-read is protected by ui_mutex, but it is an optimization, so
- * don't bother if we cannot lock the mutex.
+ * Bulk-read is protected by @ui->ui_mutex, but it is an optimization,
+ * so don't bother if we cannot lock the mutex.
*/
if (!mutex_trylock(&ui->ui_mutex))
return 0;
+
if (index != last_page_read + 1) {
/* Turn off bulk-read if we stop reading sequentially */
ui->read_in_a_row = 1;
@@ -822,6 +831,7 @@ static int ubifs_bulk_read(struct page *page)
ui->bulk_read = 0;
goto out_unlock;
}
+
if (!ui->bulk_read) {
ui->read_in_a_row += 1;
if (ui->read_in_a_row < 3)
@@ -829,10 +839,35 @@ static int ubifs_bulk_read(struct page *page)
/* Three reads in a row, so switch on bulk-read */
ui->bulk_read = 1;
}
- ret = ubifs_do_bulk_read(c, page);
+
+ /*
+ * If possible, try to use pre-allocated bulk-read information, which
+ * is protected by @c->bu_mutex.
+ */
+ if (mutex_trylock(&c->bu_mutex))
+ bu = &c->bu;
+ else {
+ bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN);
+ if (!bu)
+ goto out_unlock;
+
+ bu->buf = NULL;
+ allocated = 1;
+ }
+
+ bu->buf_len = c->max_bu_buf_len;
+ data_key_init(c, &bu->key, inode->i_ino,
+ page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT);
+ err = ubifs_do_bulk_read(c, bu, page);
+
+ if (!allocated)
+ mutex_unlock(&c->bu_mutex);
+ else
+ kfree(bu);
+
out_unlock:
mutex_unlock(&ui->ui_mutex);
- return ret;
+ return err;
}
static int ubifs_readpage(struct file *file, struct page *page)