summaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2023-02-02 10:41:00 +0100
committerJakub Kicinski <kuba@kernel.org>2023-02-04 04:56:23 +0100
commit6579f5bacc2c4cbc5ef6abb45352416939d1f844 (patch)
treeacd0874966a904b5fc338dd0696ef8e050ab37e0 /net/ipv6
parentipv4: raw: add drop reasons (diff)
downloadlinux-6579f5bacc2c4cbc5ef6abb45352416939d1f844.tar.xz
linux-6579f5bacc2c4cbc5ef6abb45352416939d1f844.zip
raw: use net_hash_mix() in hash function
Some applications seem to rely on RAW sockets. If they use private netns, we can avoid piling all RAW sockets bound to a given protocol into a single bucket. Also place (struct raw_hashinfo).lock into its own cache line to limit false sharing. Alternative would be to have per-netns hashtables, but this seems too expensive for most netns where RAW sockets are not used. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/raw.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 2e1c8060b51a..bac9ba747bde 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -152,7 +152,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {
@@ -338,7 +338,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
struct sock *sk;
int hash;
- hash = nexthdr & (RAW_HTABLE_SIZE - 1);
+ hash = raw_hashfunc(net, nexthdr);
hlist = &raw_v6_hashinfo.ht[hash];
rcu_read_lock();
sk_nulls_for_each(sk, hnode, hlist) {