summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_mount.h
diff options
context:
space:
mode:
authorLucas Stach <dev@lynxeye.de>2016-12-07 07:36:36 +0100
committerDave Chinner <david@fromorbit.com>2016-12-07 07:36:36 +0100
commit6031e73a5b3f85ec45cac08ef90995b2d3f941c7 (patch)
tree7ea855c18580f1b227a8ed8de6379a660c17d5f2 /fs/xfs/xfs_mount.h
parentxfs: optimise CRC updates (diff)
downloadlinux-6031e73a5b3f85ec45cac08ef90995b2d3f941c7.tar.xz
linux-6031e73a5b3f85ec45cac08ef90995b2d3f941c7.zip
xfs: use rhashtable to track buffer cache
On filesystems with a lot of metadata and in metadata intensive workloads xfs_buf_find() is showing up at the top of the CPU cycles trace. Most of the CPU time is spent on CPU cache misses while traversing the rbtree. As the buffer cache does not need any kind of ordering, but fast lookups a hashtable is the natural data structure to use. The rhashtable infrastructure provides a self-scaling hashtable implementation and allows lookups to proceed while the table is going through a resize operation. This reduces the CPU-time spent for the lookups to 1/3 even for small filesystems with a relatively small number of cached buffers, with possibly much larger gains on higher loaded filesystems. [dchinner: reduce minimum hash size to an acceptable size for large filesystems with many AGs with no active use.] [dchinner: remove stale rbtree asserts.] [dchinner: use xfs_buf_map for compare function argument.] [dchinner: make functions static.] [dchinner: remove redundant comments.] Signed-off-by: Lucas Stach <dev@lynxeye.de> Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Dave Chinner <david@fromorbit.com>
Diffstat (limited to 'fs/xfs/xfs_mount.h')
-rw-r--r--fs/xfs/xfs_mount.h7
1 files changed, 5 insertions, 2 deletions
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 819b80b15bfb..84f785218907 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -393,8 +393,8 @@ typedef struct xfs_perag {
unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */
/* buffer cache index */
- spinlock_t pag_buf_lock; /* lock for pag_buf_tree */
- struct rb_root pag_buf_tree; /* ordered tree of active buffers */
+ spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
+ struct rhashtable pag_buf_hash;
/* for rcu-safe freeing */
struct rcu_head rcu_head;
@@ -424,6 +424,9 @@ xfs_perag_resv(
}
}
+int xfs_buf_hash_init(xfs_perag_t *pag);
+void xfs_buf_hash_destroy(xfs_perag_t *pag);
+
extern void xfs_uuid_table_free(void);
extern int xfs_log_sbcount(xfs_mount_t *);
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);