summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authortom.leiming@gmail.com <tom.leiming@gmail.com>2015-12-29 15:40:26 +0100
committerDavid S. Miller <davem@davemloft.net>2015-12-29 21:13:44 +0100
commit45d8390c56bd2851097736c1c20ad958880168df (patch)
tree230b546c8010d793e879618ffc656e12a07fd40a /kernel/bpf/hashtab.c
parentbpf: hash: use atomic count (diff)
downloadlinux-45d8390c56bd2851097736c1c20ad958880168df.tar.xz
linux-45d8390c56bd2851097736c1c20ad958880168df.zip
bpf: hash: move select_bucket() out of htab's spinlock
The spinlock is just used for protecting the per-bucket hlist, so it isn't needed for selecting bucket. Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r--kernel/bpf/hashtab.c6
1 files changed, 2 insertions, 4 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 2615388009a4..d857fcb3607b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -248,12 +248,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
l_new->hash = htab_map_hash(l_new->key, key_size);
+ head = select_bucket(htab, l_new->hash);
/* bpf_map_update_elem() can be called in_irq() */
raw_spin_lock_irqsave(&htab->lock, flags);
- head = select_bucket(htab, l_new->hash);
-
l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
@@ -310,11 +309,10 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
key_size = map->key_size;
hash = htab_map_hash(key, key_size);
+ head = select_bucket(htab, hash);
raw_spin_lock_irqsave(&htab->lock, flags);
- head = select_bucket(htab, hash);
-
l = lookup_elem_raw(head, hash, key, key_size);
if (l) {