summaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp_offload.c
diff options
context:
space:
mode:
authorTom Herbert <therbert@google.com>2014-08-22 22:34:44 +0200
committerDavid S. Miller <davem@davemloft.net>2014-08-25 03:09:24 +0200
commit57c67ff4bd92af634f7c91c40eb02a96dd785dda (patch)
treef10aac8b764b6254075ebcba255285140d1cabea /net/ipv4/udp_offload.c
parenttcp: Call skb_gro_checksum_validate (diff)
downloadlinux-57c67ff4bd92af634f7c91c40eb02a96dd785dda.tar.xz
linux-57c67ff4bd92af634f7c91c40eb02a96dd785dda.zip
udp: additional GRO support
Implement GRO for UDPv6. Add UDP checksum verification in gro_receive for both UDP4 and UDP6 calling skb_gro_checksum_validate_zero_check. Signed-off-by: Tom Herbert <therbert@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/udp_offload.c')
-rw-r--r--net/ipv4/udp_offload.c61
1 files changed, 44 insertions, 17 deletions
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 59035bc3008d..8ed460e3753c 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -228,29 +228,22 @@ unlock:
}
EXPORT_SYMBOL(udp_del_offload);
-static struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
+struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
+ struct udphdr *uh)
{
struct udp_offload_priv *uo_priv;
struct sk_buff *p, **pp = NULL;
- struct udphdr *uh, *uh2;
- unsigned int hlen, off;
+ struct udphdr *uh2;
+ unsigned int off = skb_gro_offset(skb);
int flush = 1;
if (NAPI_GRO_CB(skb)->udp_mark ||
- (!skb->encapsulation && skb->ip_summed != CHECKSUM_COMPLETE))
+ (!skb->encapsulation && !NAPI_GRO_CB(skb)->csum_valid))
goto out;
/* mark that this skb passed once through the udp gro layer */
NAPI_GRO_CB(skb)->udp_mark = 1;
-
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*uh);
- uh = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- uh = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!uh))
- goto out;
- }
+ NAPI_GRO_CB(skb)->encapsulation++;
rcu_read_lock();
uo_priv = rcu_dereference(udp_offload_base);
@@ -269,7 +262,12 @@ unflush:
continue;
uh2 = (struct udphdr *)(p->data + off);
- if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
+
+ /* Match ports and either checksums are either both zero
+ * or nonzero.
+ */
+ if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
+ (!uh->check ^ !uh2->check)) {
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
@@ -286,7 +284,24 @@ out:
return pp;
}
-static int udp_gro_complete(struct sk_buff *skb, int nhoff)
+static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
+ struct sk_buff *skb)
+{
+ struct udphdr *uh = udp_gro_udphdr(skb);
+
+ /* Don't bother verifying checksum if we're going to flush anyway. */
+ if (unlikely(!uh) ||
+ (!NAPI_GRO_CB(skb)->flush &&
+ skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
+ inet_gro_compute_pseudo))) {
+ NAPI_GRO_CB(skb)->flush = 1;
+ return NULL;
+ }
+
+ return udp_gro_receive(head, skb, uh);
+}
+
+int udp_gro_complete(struct sk_buff *skb, int nhoff)
{
struct udp_offload_priv *uo_priv;
__be16 newlen = htons(skb->len - nhoff);
@@ -311,12 +326,24 @@ static int udp_gro_complete(struct sk_buff *skb, int nhoff)
return err;
}
+int udp4_gro_complete(struct sk_buff *skb, int nhoff)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
+
+ if (uh->check)
+ uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
+ iph->daddr, 0);
+
+ return udp_gro_complete(skb, nhoff);
+}
+
static const struct net_offload udpv4_offload = {
.callbacks = {
.gso_send_check = udp4_ufo_send_check,
.gso_segment = udp4_ufo_fragment,
- .gro_receive = udp_gro_receive,
- .gro_complete = udp_gro_complete,
+ .gro_receive = udp4_gro_receive,
+ .gro_complete = udp4_gro_complete,
},
};