diff options
author | NeilBrown <neilb@suse.com> | 2019-04-12 03:52:08 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-04-13 02:34:45 +0200 |
commit | f4712b46a529ca2da078c82d5d99d367c7ebf82b (patch) | |
tree | 0e77f82fcf92c8483e192ecba4acb6ce8c5a86a8 /lib/rhashtable.c | |
parent | rhashtable: move dereference inside rht_ptr() (diff) | |
download | linux-f4712b46a529ca2da078c82d5d99d367c7ebf82b.tar.xz linux-f4712b46a529ca2da078c82d5d99d367c7ebf82b.zip |
rhashtable: replace rht_ptr_locked() with rht_assign_locked()
The only times rht_ptr_locked() is used, it is to store a new
value in a bucket-head. This is the only time it makes sense
to use it too. So replace it by a function which does the
whole task: Sets the lock bit and assigns to a bucket head.
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r-- | lib/rhashtable.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 237368ea98c5..ef5378efdef3 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -259,7 +259,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, rcu_assign_pointer(*pprev, next); else /* Need to preserved the bit lock. */ - rcu_assign_pointer(*bkt, rht_ptr_locked(next)); + rht_assign_locked(bkt, next); out: return err; @@ -517,7 +517,7 @@ static void *rhashtable_lookup_one(struct rhashtable *ht, rcu_assign_pointer(*pprev, obj); else /* Need to preserve the bit lock */ - rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); + rht_assign_locked(bkt, obj); return NULL; } @@ -570,7 +570,7 @@ static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, /* bkt is always the head of the list, so it holds * the lock, which we need to preserve */ - rcu_assign_pointer(*bkt, rht_ptr_locked(obj)); + rht_assign_locked(bkt, obj); atomic_inc(&ht->nelems); if (rht_grow_above_75(ht, tbl)) |