summaryrefslogtreecommitdiffstats
path: root/lib/rhashtable.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2018-06-18 04:52:50 +0200
committerDavid S. Miller <davem@davemloft.net>2018-06-22 06:43:27 +0200
commit0ad66449aa3cbaedbdeaf55bffce74084bb7e9f9 (patch)
tree32b444b2154c26c3936446dcc9d1d234521f23a5 /lib/rhashtable.c
parentrhashtable: simplify nested_table_alloc() and rht_bucket_nested_insert() (diff)
downloadlinux-0ad66449aa3cbaedbdeaf55bffce74084bb7e9f9.tar.xz
linux-0ad66449aa3cbaedbdeaf55bffce74084bb7e9f9.zip
rhashtable: use cmpxchg() to protect ->future_tbl.
Rather than borrowing one of the bucket locks to protect ->future_tbl updates, use cmpxchg(). This gives more freedom to change how bucket locking is implemented. Acked-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib/rhashtable.c')
-rw-r--r--lib/rhashtable.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 2aa41c15df17..52ec83212856 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -297,21 +297,14 @@ static int rhashtable_rehash_attach(struct rhashtable *ht,
struct bucket_table *old_tbl,
struct bucket_table *new_tbl)
{
- /* Protect future_tbl using the first bucket lock. */
- spin_lock_bh(old_tbl->locks);
-
- /* Did somebody beat us to it? */
- if (rcu_access_pointer(old_tbl->future_tbl)) {
- spin_unlock_bh(old_tbl->locks);
- return -EEXIST;
- }
-
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
+ * As cmpxchg() provides strong barriers, we do not need
+ * rcu_assign_pointer().
*/
- rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
- spin_unlock_bh(old_tbl->locks);
+ if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL)
+ return -EEXIST;
return 0;
}