diff options
author | NeilBrown <neilb@suse.com> | 2019-04-12 03:52:08 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-13 02:34:45 +0200 |
commit | adc6a3ab192eb40fb9d8b093c87d9aa785af4513 (patch) | |
tree | 6aae93c08ed6295e47300fbe11dc010ee1b32a1d /lib | |
parent | rhashtable: reorder some inline functions and macros. (diff) | |
download | linux-adc6a3ab192eb40fb9d8b093c87d9aa785af4513.tar.xz linux-adc6a3ab192eb40fb9d8b093c87d9aa785af4513.zip |
rhashtable: move dereference inside rht_ptr()
Rather than dereferencing a pointer to a bucket and then passing the
result to rht_ptr(), we now pass in the pointer and do the dereference
in rht_ptr().
This requires that we pass in the tbl and hash as well to support RCU
checks, and means that the various rht_for_each functions can expect a
pointer that can be dereferenced without further care.
There are two places where we dereference a bucket pointer
where there is no testable protection - in each case we know
that we much have exclusive access without having taken a lock.
The previous code used rht_dereference() to pretend that holding
the mutex provided protects, but holding the mutex never provides
protection for accessing buckets.
So instead introduce rht_ptr_exclusive() that can be used when
there is known to be exclusive access without holding any locks.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/rhashtable.c | 12 | ||||
-rw-r--r-- | lib/test_rhashtable.c | 2 |
2 files changed, 7 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e387ceb00e86..237368ea98c5 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,7 +231,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, err = -ENOENT; - rht_for_each_from(entry, rht_ptr(*bkt), old_tbl, old_hash) { + rht_for_each_from(entry, rht_ptr(bkt, old_tbl, old_hash), + old_tbl, old_hash) { err = 0; next = rht_dereference_bucket(entry->next, old_tbl, old_hash); @@ -248,8 +249,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); - head = rht_ptr(rht_dereference_bucket(new_tbl->buckets[new_hash], - new_tbl, new_hash)); + head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); @@ -491,7 +491,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, int elasticity; elasticity = RHT_ELASTICITY; - rht_for_each_from(head, rht_ptr(*bkt), tbl, hash) { + rht_for_each_from(head, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *list; struct rhlist_head *plist; @@ -557,7 +557,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, if (unlikely(rht_grow_above_100(ht, tbl))) return ERR_PTR(-EAGAIN); - head = rht_ptr(rht_dereference_bucket(*bkt, tbl, hash)); + head = rht_ptr(bkt, tbl, hash); RCU_INIT_POINTER(obj->next, head); if (ht->rhlist) { @@ -1139,7 +1139,7 @@ restart: struct rhash_head *pos, *next; cond_resched(); - for (pos = rht_ptr(rht_dereference(*rht_bucket(tbl, i), ht)), + for (pos = rht_ptr_exclusive(rht_bucket(tbl, i)), next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; !rht_is_a_nulls(pos); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 02592c2a249c..084fe5a6ac57 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -500,7 +500,7 @@ static unsigned int __init print_ht(struct rhltable *rhlt) struct rhash_head *pos, *next; struct test_obj_rhl *p; - pos = rht_ptr(rht_dereference(tbl->buckets[i], ht)); + pos = rht_ptr_exclusive(tbl->buckets + i); next = !rht_is_a_nulls(pos) ? rht_dereference(pos->next, ht) : NULL; if (!rht_is_a_nulls(pos)) { |