diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2015-03-14 03:57:21 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-03-15 06:35:34 +0100 |
commit | 8f2484bdb55daa53ecaddb5fa4c298e3d262b69e (patch) | |
tree | 4d27176d889d22df4587fd4e94ff85d406409b8a /lib/rhashtable.c | |
parent | rhashtable: Fix walker behaviour during rehash (diff) | |
download | linux-8f2484bdb55daa53ecaddb5fa4c298e3d262b69e.tar.xz linux-8f2484bdb55daa53ecaddb5fa4c298e3d262b69e.zip |
rhashtable: Use SINGLE_DEPTH_NESTING
We only nest one level deep there is no need to roll our own
subclasses.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | lib/rhashtable.c | 9 |
1 files changed, 2 insertions, 7 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c index f7c76079f8f1..5d06cc2b1e4a 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -33,11 +33,6 @@ /* Base bits plus 1 bit for nulls marker */ #define HASH_RESERVED_SPACE (RHT_BASE_BITS + 1) -enum { - RHT_LOCK_NORMAL, - RHT_LOCK_NESTED, -}; - /* The bucket lock is selected based on the hash and protects mutations * on a group of hash buckets. * @@ -231,7 +226,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash) new_bucket_lock = bucket_lock(new_tbl, new_hash); - spin_lock_nested(new_bucket_lock, RHT_LOCK_NESTED); + spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING); head = rht_dereference_bucket(new_tbl->buckets[new_hash], new_tbl, new_hash); @@ -405,7 +400,7 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, tbl = rht_dereference_rcu(ht->future_tbl, ht); if (tbl != old_tbl) { hash = head_hashfn(ht, tbl, obj); - spin_lock_nested(bucket_lock(tbl, hash), RHT_LOCK_NESTED); + spin_lock_nested(bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING); } if (compare && |