summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2015-03-23 14:50:26 +0100
committerDavid S. Miller <davem@davemloft.net>2015-03-24 03:07:52 +0100
commitb824478b2145be78ac19e1cf44e2b9036c7a9608 (patch)
tree19c7d4ad479fd987eef5fd9b6063df27fd26012a /lib
parentrhashtable: Shrink to fit (diff)
downloadlinux-b824478b2145be78ac19e1cf44e2b9036c7a9608.tar.xz
linux-b824478b2145be78ac19e1cf44e2b9036c7a9608.zip
rhashtable: Add multiple rehash support
This patch adds the missing bits to allow multiple rehashes. The read-side as well as remove already handle this correctly. So it's only the rehasher and insertion that need modification to handle this. Note that this patch doesn't actually enable it so for now rehashing is still only performed by the worker thread. This patch also disables the explicit expand/shrink interface because the table is meant to expand and shrink automatically, and continuing to export these interfaces unnecessarily complicates the life of the rehasher since the rehash process is now composed of two parts. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Acked-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'lib')
-rw-r--r--lib/rhashtable.c87
-rw-r--r--lib/test_rhashtable.c24
2 files changed, 72 insertions, 39 deletions
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9623be345d9c..5e04403e25f5 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -136,11 +136,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
return tbl;
}
+static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
+ struct bucket_table *tbl)
+{
+ struct bucket_table *new_tbl;
+
+ do {
+ new_tbl = tbl;
+ tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ } while (tbl);
+
+ return new_tbl;
+}
+
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
{
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct bucket_table *new_tbl =
- rht_dereference(old_tbl->future_tbl, ht) ?: old_tbl;
+ struct bucket_table *new_tbl = rhashtable_last_table(ht,
+ rht_dereference_rcu(old_tbl->future_tbl, ht));
struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
int err = -ENOENT;
struct rhash_head *head, *next, *entry;
@@ -196,12 +209,18 @@ static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
spin_unlock_bh(old_bucket_lock);
}
-static void rhashtable_rehash(struct rhashtable *ht,
- struct bucket_table *new_tbl)
+static int rhashtable_rehash_attach(struct rhashtable *ht,
+ struct bucket_table *old_tbl,
+ struct bucket_table *new_tbl)
{
- struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
- struct rhashtable_walker *walker;
- unsigned old_hash;
+ /* Protect future_tbl using the first bucket lock. */
+ spin_lock_bh(old_tbl->locks);
+
+ /* Did somebody beat us to it? */
+ if (rcu_access_pointer(old_tbl->future_tbl)) {
+ spin_unlock_bh(old_tbl->locks);
+ return -EEXIST;
+ }
/* Make insertions go into the new, empty table right away. Deletions
* and lookups will be attempted in both tables until we synchronize.
@@ -211,6 +230,22 @@ static void rhashtable_rehash(struct rhashtable *ht,
/* Ensure the new table is visible to readers. */
smp_wmb();
+ spin_unlock_bh(old_tbl->locks);
+
+ return 0;
+}
+
+static int rhashtable_rehash_table(struct rhashtable *ht)
+{
+ struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
+ struct bucket_table *new_tbl;
+ struct rhashtable_walker *walker;
+ unsigned old_hash;
+
+ new_tbl = rht_dereference(old_tbl->future_tbl, ht);
+ if (!new_tbl)
+ return 0;
+
for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
rhashtable_rehash_chain(ht, old_hash);
@@ -225,6 +260,8 @@ static void rhashtable_rehash(struct rhashtable *ht,
* remain.
*/
call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
+
+ return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
}
/**
@@ -242,20 +279,25 @@ static void rhashtable_rehash(struct rhashtable *ht,
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals.
*/
-int rhashtable_expand(struct rhashtable *ht)
+static int rhashtable_expand(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+ int err;
ASSERT_RHT_MUTEX(ht);
+ old_tbl = rhashtable_last_table(ht, old_tbl);
+
new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
if (new_tbl == NULL)
return -ENOMEM;
- rhashtable_rehash(ht, new_tbl);
- return 0;
+ err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+ if (err)
+ bucket_table_free(new_tbl);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_expand);
/**
* rhashtable_shrink - Shrink hash table while allowing concurrent lookups
@@ -273,10 +315,11 @@ EXPORT_SYMBOL_GPL(rhashtable_expand);
* It is valid to have concurrent insertions and deletions protected by per
* bucket locks or concurrent RCU protected lookups and traversals.
*/
-int rhashtable_shrink(struct rhashtable *ht)
+static int rhashtable_shrink(struct rhashtable *ht)
{
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
+ int err;
ASSERT_RHT_MUTEX(ht);
@@ -286,19 +329,25 @@ int rhashtable_shrink(struct rhashtable *ht)
if (old_tbl->size <= size)
return 0;
+ if (rht_dereference(old_tbl->future_tbl, ht))
+ return -EEXIST;
+
new_tbl = bucket_table_alloc(ht, size);
if (new_tbl == NULL)
return -ENOMEM;
- rhashtable_rehash(ht, new_tbl);
- return 0;
+ err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
+ if (err)
+ bucket_table_free(new_tbl);
+
+ return err;
}
-EXPORT_SYMBOL_GPL(rhashtable_shrink);
static void rht_deferred_worker(struct work_struct *work)
{
struct rhashtable *ht;
struct bucket_table *tbl;
+ int err = 0;
ht = container_of(work, struct rhashtable, run_work);
mutex_lock(&ht->mutex);
@@ -306,13 +355,20 @@ static void rht_deferred_worker(struct work_struct *work)
goto unlock;
tbl = rht_dereference(ht->tbl, ht);
+ tbl = rhashtable_last_table(ht, tbl);
if (rht_grow_above_75(ht, tbl))
rhashtable_expand(ht);
else if (rht_shrink_below_30(ht, tbl))
rhashtable_shrink(ht);
+
+ err = rhashtable_rehash_table(ht);
+
unlock:
mutex_unlock(&ht->mutex);
+
+ if (err)
+ schedule_work(&ht->run_work);
}
int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
@@ -323,6 +379,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
unsigned hash;
int err = -EEXIST;
+ tbl = rhashtable_last_table(ht, tbl);
hash = head_hashfn(ht, tbl, obj);
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index a2ba6adb60a2..a42a0d44e818 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -155,30 +155,6 @@ static int __init test_rhashtable(struct rhashtable *ht)
test_rht_lookup(ht);
rcu_read_unlock();
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table expansion iteration %u...\n", i);
- mutex_lock(&ht->mutex);
- rhashtable_expand(ht);
- mutex_unlock(&ht->mutex);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
- for (i = 0; i < TEST_NEXPANDS; i++) {
- pr_info(" Table shrinkage iteration %u...\n", i);
- mutex_lock(&ht->mutex);
- rhashtable_shrink(ht);
- mutex_unlock(&ht->mutex);
-
- rcu_read_lock();
- pr_info(" Verifying lookups...\n");
- test_rht_lookup(ht);
- rcu_read_unlock();
- }
-
rcu_read_lock();
test_bucket_stats(ht, true);
rcu_read_unlock();