diff options
author | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2011-02-22 18:26:17 +0100 |
---|---|---|
committer | Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp> | 2011-03-08 06:58:04 +0100 |
commit | 9954e7af14868b8b79e76b7b88daaf0b3866db33 (patch) | |
tree | 562b473d974ee6715cff8732e518a9e187c50d38 /fs/nilfs2/alloc.c | |
parent | nilfs2: decrement inodes count only if raw inode was successfully deleted (diff) | |
download | linux-9954e7af14868b8b79e76b7b88daaf0b3866db33.tar.xz linux-9954e7af14868b8b79e76b7b88daaf0b3866db33.zip |
nilfs2: add free entries count only if clear bit operation succeeded
Three functions of the current persistent object allocator,
nilfs_palloc_commit_free_entry, nilfs_palloc_abort_alloc_entry, and
nilfs_palloc_freev functions unconditionally add a counter after doing
clear bit operation on a bitmap block.
If the clear bit operation overlapped, the counter will not add up.
This fixes the issue by making the counter operations conditional.
Signed-off-by: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Diffstat (limited to 'fs/nilfs2/alloc.c')
-rw-r--r-- | fs/nilfs2/alloc.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index d7fd696e595c..0a0a66d98cce 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -521,8 +521,8 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, group_offset, bitmap)) printk(KERN_WARNING "%s: entry number %llu already freed\n", __func__, (unsigned long long)req->pr_entry_nr); - - nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); + else + nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_desc_bh->b_page); @@ -558,8 +558,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, group_offset, bitmap)) printk(KERN_WARNING "%s: entry number %llu already freed\n", __func__, (unsigned long long)req->pr_entry_nr); - - nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); + else + nilfs_palloc_group_desc_add_entries(inode, group, desc, 1); kunmap(req->pr_bitmap_bh->b_page); kunmap(req->pr_desc_bh->b_page); @@ -665,7 +665,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) for (j = i, n = 0; (j < nitems) && nilfs_palloc_group_is_in(inode, group, entry_nrs[j]); - j++, n++) { + j++) { nilfs_palloc_group(inode, entry_nrs[j], &group_offset); if (!nilfs_clear_bit_atomic( nilfs_mdt_bgl_lock(inode, group), @@ -674,6 +674,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) "%s: entry number %llu already freed\n", __func__, (unsigned long long)entry_nrs[j]); + } else { + n++; } } nilfs_palloc_group_desc_add_entries(inode, group, desc, n); |