diff options
author | Andreas Gruenbacher <agruenba@redhat.com> | 2016-02-23 04:42:05 +0100 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2016-02-23 04:42:05 +0100 |
commit | dc8d5e565f00c9442fa1cbf9acc115475628527c (patch) | |
tree | 19cee33666fc32278307e03ede65746dc992f71c /fs/mbcache.c | |
parent | ext4: kill ext4_mballoc_ready (diff) | |
download | linux-dc8d5e565f00c9442fa1cbf9acc115475628527c.tar.xz linux-dc8d5e565f00c9442fa1cbf9acc115475628527c.zip |
mbcache: get rid of _e_hash_list_head
Get rid of field _e_hash_list_head in cache entries and add bit field
e_referenced instead.
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: Jan Kara <jack@suse.cz>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/mbcache.c')
-rw-r--r-- | fs/mbcache.c | 41 |
1 files changed, 10 insertions, 31 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index 4241b633f155..903be151dcfe 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -45,27 +45,10 @@ static struct kmem_cache *mb_entry_cache; static unsigned long mb_cache_shrink(struct mb_cache *cache, unsigned int nr_to_scan); -static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry) +static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, + u32 key) { - return entry->_e_hash_list_head & 1; -} - -static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry) -{ - entry->_e_hash_list_head |= 1; -} - -static inline void mb_cache_entry_clear_referenced( - struct mb_cache_entry *entry) -{ - entry->_e_hash_list_head &= ~1; -} - -static inline struct hlist_bl_head *mb_cache_entry_head( - struct mb_cache_entry *entry) -{ - return (struct hlist_bl_head *) - (entry->_e_hash_list_head & ~1); + return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; } /* @@ -108,8 +91,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, atomic_set(&entry->e_refcnt, 1); entry->e_key = key; entry->e_block = block; - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; - entry->_e_hash_list_head = (unsigned long)head; + head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { if (dup->e_key == key && dup->e_block == block) { @@ -146,10 +128,7 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache, struct hlist_bl_node *node; struct hlist_bl_head *head; - if (entry) - head = mb_cache_entry_head(entry); - else - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; + head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); if (entry && !hlist_bl_unhashed(&entry->e_hash_list)) node = entry->e_hash_list.next; @@ -219,7 +198,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key, struct hlist_bl_head *head; struct mb_cache_entry *entry; - head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; + head = mb_cache_entry_head(cache, key); hlist_bl_lock(head); hlist_bl_for_each_entry(entry, node, head, e_hash_list) { if (entry->e_key == key && entry->e_block == block) { @@ -250,7 +229,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_block); void mb_cache_entry_touch(struct mb_cache *cache, struct mb_cache_entry *entry) { - mb_cache_entry_set_referenced(entry); + entry->e_referenced = 1; } EXPORT_SYMBOL(mb_cache_entry_touch); @@ -275,8 +254,8 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, while (nr_to_scan-- && !list_empty(&cache->c_list)) { entry = list_first_entry(&cache->c_list, struct mb_cache_entry, e_list); - if (mb_cache_entry_referenced(entry)) { - mb_cache_entry_clear_referenced(entry); + if (entry->e_referenced) { + entry->e_referenced = 0; list_move_tail(&cache->c_list, &entry->e_list); continue; } @@ -287,7 +266,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache, * from under us. */ spin_unlock(&cache->c_list_lock); - head = mb_cache_entry_head(entry); + head = mb_cache_entry_head(cache, entry->e_key); hlist_bl_lock(head); if (!hlist_bl_unhashed(&entry->e_hash_list)) { hlist_bl_del_init(&entry->e_hash_list); |