diff options
author | Christoph Hellwig <hch@infradead.org> | 2008-10-30 06:58:32 +0100 |
---|---|---|
committer | Lachlan McIlroy <lachlan@sgi.com> | 2008-10-30 06:58:32 +0100 |
commit | 4a26e66e7728112f0e1cd7eca3bcc430b3a221c9 (patch) | |
tree | 1944f9aa65476c963658b7b4679f7a64287adfb6 /fs/xfs/xfs_alloc_btree.c | |
parent | [XFS] kill xfs_bmbt_log_block and xfs_bmbt_log_recs (diff) | |
download | linux-4a26e66e7728112f0e1cd7eca3bcc430b3a221c9.tar.xz linux-4a26e66e7728112f0e1cd7eca3bcc430b3a221c9.zip |
[XFS] add keys_inorder and recs_inorder btree methods
Add methods to check whether two keys/records are in the righ order. This
replaces the xfs_btree_check_key and xfs_btree_check_rec methods. For the
callers from xfs_bmap.c just opencode the bmbt-specific asserts.
SGI-PV: 985583
SGI-Modid: xfs-linux-melb:xfs-kern:32208a
Signed-off-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>
Signed-off-by: Bill O'Donnell <billodo@sgi.com>
Signed-off-by: David Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_alloc_btree.c')
-rw-r--r-- | fs/xfs/xfs_alloc_btree.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 4d44f03858b0..9e63f8c180d9 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c @@ -311,6 +311,45 @@ xfs_allocbt_kill_root( return 0; } +#ifdef DEBUG +STATIC int +xfs_allocbt_keys_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_key *k1, + union xfs_btree_key *k2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock); + } else { + return be32_to_cpu(k1->alloc.ar_blockcount) < + be32_to_cpu(k2->alloc.ar_blockcount) || + (k1->alloc.ar_blockcount == k2->alloc.ar_blockcount && + be32_to_cpu(k1->alloc.ar_startblock) < + be32_to_cpu(k2->alloc.ar_startblock)); + } +} + +STATIC int +xfs_allocbt_recs_inorder( + struct xfs_btree_cur *cur, + union xfs_btree_rec *r1, + union xfs_btree_rec *r2) +{ + if (cur->bc_btnum == XFS_BTNUM_BNO) { + return be32_to_cpu(r1->alloc.ar_startblock) + + be32_to_cpu(r1->alloc.ar_blockcount) <= + be32_to_cpu(r2->alloc.ar_startblock); + } else { + return be32_to_cpu(r1->alloc.ar_blockcount) < + be32_to_cpu(r2->alloc.ar_blockcount) || + (r1->alloc.ar_blockcount == r2->alloc.ar_blockcount && + be32_to_cpu(r1->alloc.ar_startblock) < + be32_to_cpu(r2->alloc.ar_startblock)); + } +} +#endif /* DEBUG */ + #ifdef XFS_BTREE_TRACE ktrace_t *xfs_allocbt_trace_buf; @@ -395,6 +434,11 @@ static const struct xfs_btree_ops xfs_allocbt_ops = { .init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur, .key_diff = xfs_allocbt_key_diff, +#ifdef DEBUG + .keys_inorder = xfs_allocbt_keys_inorder, + .recs_inorder = xfs_allocbt_recs_inorder, +#endif + #ifdef XFS_BTREE_TRACE .trace_enter = xfs_allocbt_trace_enter, .trace_cursor = xfs_allocbt_trace_cursor, |