diff options
author | Jakub Kicinski <kuba@kernel.org> | 2021-12-02 20:44:56 +0100 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2021-12-02 20:44:56 +0100 |
commit | fc993be36f9ea7fc286d84d8471a1a20e871aad4 (patch) | |
tree | cef440a299ed7d1c2418992f7d724b4d86e6d83b /include/net/sock.h | |
parent | selftests: net: remove meaningless help option (diff) | |
parent | Merge tag 'net-5.16-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/net... (diff) | |
download | linux-fc993be36f9ea7fc286d84d8471a1a20e871aad4.tar.xz linux-fc993be36f9ea7fc286d84d8471a1a20e871aad4.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 30 |
1 files changed, 23 insertions, 7 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index e7c231373875..ae61cd0b650d 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1943,18 +1943,31 @@ static inline int sk_tx_queue_get(const struct sock *sk) return -1; } -static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +static inline void __sk_rx_queue_set(struct sock *sk, + const struct sk_buff *skb, + bool force_set) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING if (skb_rx_queue_recorded(skb)) { u16 rx_queue = skb_get_rx_queue(skb); - if (unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) + if (force_set || + unlikely(READ_ONCE(sk->sk_rx_queue_mapping) != rx_queue)) WRITE_ONCE(sk->sk_rx_queue_mapping, rx_queue); } #endif } +static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, true); +} + +static inline void sk_rx_queue_update(struct sock *sk, const struct sk_buff *skb) +{ + __sk_rx_queue_set(sk, skb, false); +} + static inline void sk_rx_queue_clear(struct sock *sk) { #ifdef CONFIG_SOCK_RX_QUEUE_MAPPING @@ -2457,19 +2470,22 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) * @sk: socket * * Use the per task page_frag instead of the per socket one for - * optimization when we know that we're in the normal context and owns + * optimization when we know that we're in process context and own * everything that's associated with %current. * - * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest - * inside other socket operations and end up recursing into sk_page_frag() - * while it's already in use. + * Both direct reclaim and page faults can nest inside other + * socket operations and end up recursing into sk_page_frag() + * while it's already in use: explicitly avoid task page_frag + * usage if the caller is potentially doing any of them. + * This assumes that page fault handlers use the GFP_NOFS flags. * * Return: a per task page_frag if context allows that, * otherwise a per socket one. */ static inline struct page_frag *sk_page_frag(struct sock *sk) { - if (gfpflags_normal_context(sk->sk_allocation)) + if ((sk->sk_allocation & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC | __GFP_FS)) == + (__GFP_DIRECT_RECLAIM | __GFP_FS)) return ¤t->task_frag; return &sk->sk_frag; |