summaryrefslogtreecommitdiffstats
path: root/fs/jffs2/nodelist.c
diff options
context:
space:
mode:
authorDavid Woodhouse <dwmw2@infradead.org>2006-05-26 22:19:05 +0200
committerDavid Woodhouse <dwmw2@infradead.org>2006-05-26 22:19:05 +0200
commit9bfeb691e75b21fdaa80ffae719083200b190381 (patch)
tree3c828820f1385249835f85e5073b4ffd10fcd09c /fs/jffs2/nodelist.c
parent[MTD] NAND modularize write function (diff)
downloadlinux-9bfeb691e75b21fdaa80ffae719083200b190381.tar.xz
linux-9bfeb691e75b21fdaa80ffae719083200b190381.zip
[JFFS2] Switch to using an array of jffs2_raw_node_refs instead of a list.
This allows us to drop another pointer from the struct jffs2_raw_node_ref, shrinking it to 8 bytes on 32-bit machines (if the TEST_TOTLEN) paranoia check is turned off, which will be committed soon). Signed-off-by: David Woodhouse <dwmw2@infradead.org>
Diffstat (limited to 'fs/jffs2/nodelist.c')
-rw-r--r--fs/jffs2/nodelist.c93
1 files changed, 47 insertions, 46 deletions
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 0e82979c741c..5d36e9b4d7c5 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -954,18 +954,16 @@ void jffs2_free_raw_node_refs(struct jffs2_sb_info *c)
for (i=0; i<c->nr_blocks; i++) {
this = c->blocks[i].first_node;
while (this) {
- next = this->next_phys;
- __jffs2_free_raw_node_ref(this);
+ if (this[REFS_PER_BLOCK].flash_offset == REF_LINK_NODE)
+ next = this[REFS_PER_BLOCK].next_in_ino;
+ else
+ next = NULL;
+
+ jffs2_free_refblock(this);
this = next;
}
c->blocks[i].first_node = c->blocks[i].last_node = NULL;
}
- this = c->refs;
- while (this) {
- next = this->next_in_ino;
- __jffs2_free_raw_node_ref(this);
- this = next;
- }
}
struct jffs2_node_frag *jffs2_lookup_node_frag(struct rb_root *fragtree, uint32_t offset)
@@ -1060,32 +1058,37 @@ struct jffs2_raw_node_ref *jffs2_link_node_ref(struct jffs2_sb_info *c,
{
struct jffs2_raw_node_ref *ref;
- /* These will be preallocated _very_ shortly. */
- ref = c->refs;
- if (!c->refs) {
- JFFS2_WARNING("Using non-preallocated refs!\n");
- ref = __jffs2_alloc_raw_node_ref();
- BUG_ON(!ref);
- WARN_ON(1);
- } else {
- c->refs = ref->next_in_ino;
+ BUG_ON(!jeb->allocated_refs);
+ jeb->allocated_refs--;
+
+ ref = jeb->last_node;
+
+ dbg_noderef("Last node at %p is (%08x,%p)\n", ref, ref->flash_offset,
+ ref->next_in_ino);
+
+ while (ref->flash_offset != REF_EMPTY_NODE) {
+ if (ref->flash_offset == REF_LINK_NODE)
+ ref = ref->next_in_ino;
+ else
+ ref++;
}
- ref->next_phys = NULL;
+ dbg_noderef("New ref is %p (%08x becomes %08x,%p) len 0x%x\n", ref,
+ ref->flash_offset, ofs, ref->next_in_ino, len);
+
ref->flash_offset = ofs;
- if (!jeb->first_node)
+ if (!jeb->first_node) {
jeb->first_node = ref;
- if (jeb->last_node) {
- jeb->last_node->next_phys = ref;
-#ifdef TEST_TOTLEN
- if (ref_offset(jeb->last_node) + jeb->last_node->__totlen != ref_offset(ref)) {
- printk(KERN_CRIT "Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
- ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
- ref_offset(jeb->last_node), ref_offset(jeb->last_node)+jeb->last_node->__totlen);
- WARN_ON(1);
- }
-#endif
+ BUG_ON(ref_offset(ref) != jeb->offset);
+ } else if (unlikely(ref_offset(ref) != jeb->offset + c->sector_size - jeb->free_size)) {
+ uint32_t last_len = ref_totlen(c, jeb, jeb->last_node);
+
+ JFFS2_ERROR("Adding new ref %p at (0x%08x-0x%08x) not immediately after previous (0x%08x-0x%08x)\n",
+ ref, ref_offset(ref), ref_offset(ref)+len,
+ ref_offset(jeb->last_node),
+ ref_offset(jeb->last_node)+last_len);
+ BUG();
}
jeb->last_node = ref;
@@ -1130,12 +1133,13 @@ int jffs2_scan_dirty_space(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb
{
if (!size)
return 0;
- if (size > c->sector_size - jeb->used_size) {
- printk(KERN_CRIT "Dirty space 0x%x larger then used_size 0x%x (wasted 0x%x)\n",
- size, jeb->used_size, jeb->wasted_size);
+ if (unlikely(size > jeb->free_size)) {
+ printk(KERN_CRIT "Dirty space 0x%x larger then free_size 0x%x (wasted 0x%x)\n",
+ size, jeb->free_size, jeb->wasted_size);
BUG();
}
- if (jeb->last_node && ref_obsolete(jeb->last_node)) {
+ /* REF_EMPTY_NODE is !obsolete, so that works OK */
+ if (ref_obsolete(jeb->last_node)) {
#ifdef TEST_TOTLEN
jeb->last_node->__totlen += size;
#endif
@@ -1168,7 +1172,7 @@ static inline uint32_t __ref_totlen(struct jffs2_sb_info *c,
jeb = &c->blocks[ref->flash_offset / c->sector_size];
/* Last node in block. Use free_space */
- if (ref != jeb->last_node) {
+ if (unlikely(ref != jeb->last_node)) {
printk(KERN_CRIT "ref %p @0x%08x is not jeb->last_node (%p @0x%08x)\n",
ref, ref_offset(ref), jeb->last_node, jeb->last_node?ref_offset(jeb->last_node):0);
BUG();
@@ -1183,17 +1187,13 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
{
uint32_t ret;
-#if CONFIG_JFFS2_FS_DEBUG > 0
- if (jeb && jeb != &c->blocks[ref->flash_offset / c->sector_size]) {
- printk(KERN_CRIT "ref_totlen called with wrong block -- at 0x%08x instead of 0x%08x; ref 0x%08x\n",
- jeb->offset, c->blocks[ref->flash_offset / c->sector_size].offset, ref_offset(ref));
- BUG();
- }
-#endif
-
ret = __ref_totlen(c, jeb, ref);
+
#ifdef TEST_TOTLEN
- if (ret != ref->__totlen) {
+ if (unlikely(ret != ref->__totlen)) {
+ if (!jeb)
+ jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
printk(KERN_CRIT "Totlen for ref at %p (0x%08x-0x%08x) miscalculated as 0x%x instead of %x\n",
ref, ref_offset(ref), ref_offset(ref)+ref->__totlen,
ret, ref->__totlen);
@@ -1204,13 +1204,14 @@ uint32_t __jffs2_ref_totlen(struct jffs2_sb_info *c, struct jffs2_eraseblock *je
printk(KERN_CRIT "No next ref. jeb->last_node is %p\n", jeb->last_node);
printk(KERN_CRIT "jeb->wasted_size %x, dirty_size %x, used_size %x, free_size %x\n", jeb->wasted_size, jeb->dirty_size, jeb->used_size, jeb->free_size);
- ret = ref->__totlen;
- if (!jeb)
- jeb = &c->blocks[ref->flash_offset / c->sector_size];
+
#if defined(JFFS2_DBG_DUMPS) || defined(JFFS2_DBG_PARANOIA_CHECKS)
__jffs2_dbg_dump_node_refs_nolock(c, jeb);
#endif
+
WARN_ON(1);
+
+ ret = ref->__totlen;
}
#endif /* TEST_TOTLEN */
return ret;