diff options
author | Paolo Abeni <pabeni@redhat.com> | 2021-07-28 18:24:03 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-07-29 13:18:12 +0200 |
commit | 5e10da5385d20c4bae587bc2921e5fdd9655d5fc (patch) | |
tree | b199e9c7b0e962d363ed6a4d06d6521af76018d9 /net/core/skbuff.c | |
parent | net: optimize GRO for the common case. (diff) | |
download | linux-5e10da5385d20c4bae587bc2921e5fdd9655d5fc.tar.xz linux-5e10da5385d20c4bae587bc2921e5fdd9655d5fc.zip |
skbuff: allow 'slow_gro' for skb carring sock reference
This change leverages the infrastructure introduced by the previous
patches to allow soft devices passing to the GRO engine owned skbs
without impacting the fast-path.
It's up to the GRO caller ensuring the slow_gro bit validity before
invoking the GRO engine. The new helper skb_prepare_for_gro() is
introduced for that goal.
On slow_gro, skbs are aggregated only with equal sk.
Additionally, skb truesize on GRO recycle and free is correctly
updated so that sk wmem is not changed by the GRO processing.
rfc-> v1:
- fixed bad truesize on dev_gro_receive NAPI_FREE
- use the existing state bit
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to '')
-rw-r--r-- | net/core/skbuff.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index d04e286149cc..fcbd977186b0 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -958,6 +958,7 @@ void napi_skb_free_stolen_head(struct sk_buff *skb) nf_reset_ct(skb); skb_dst_drop(skb); skb_ext_put(skb); + skb_orphan(skb); skb->slow_gro = 0; } napi_skb_cache_put(skb); @@ -3892,6 +3893,9 @@ int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) NAPI_GRO_CB(p)->last = skb; NAPI_GRO_CB(p)->count++; p->data_len += skb->len; + + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; p->truesize += skb->truesize; p->len += skb->len; @@ -4259,6 +4263,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) unsigned int headlen = skb_headlen(skb); unsigned int len = skb_gro_len(skb); unsigned int delta_truesize; + unsigned int new_truesize; struct sk_buff *lp; if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) @@ -4290,10 +4295,10 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) skb_frag_size_sub(frag, offset); /* all fragments truesize : remove (head size + sk_buff) */ - delta_truesize = skb->truesize - - SKB_TRUESIZE(skb_end_offset(skb)); + new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); + delta_truesize = skb->truesize - new_truesize; - skb->truesize -= skb->data_len; + skb->truesize = new_truesize; skb->len -= skb->data_len; skb->data_len = 0; @@ -4322,12 +4327,16 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags); /* We dont need to clear skbinfo->nr_frags here */ - delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); + new_truesize = SKB_TRUESIZE(sizeof(struct sk_buff)); + delta_truesize = skb->truesize - new_truesize; + skb->truesize = new_truesize; NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; goto done; } merge: + /* sk owenrship - if any - completely transferred to the aggregated packet */ + skb->destructor = NULL; delta_truesize = skb->truesize; if (offset > headlen) { unsigned int eat = offset - headlen; |