summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-06-27 21:43:57 +0200
committerDavid S. Miller <davem@davemloft.net>2017-06-27 21:43:57 +0200
commited9208be8d8f3ce164892ba7ca484501c881de66 (patch)
tree4d593cd01e8f7783935fae566b9e2cb4a29159a6
parenttcp: fix null ptr deref in getsockopt(..., TCP_ULP, ...) (diff)
parentipv6: udp: leverage scratch area helpers (diff)
downloadlinux-ed9208be8d8f3ce164892ba7ca484501c881de66.tar.xz
linux-ed9208be8d8f3ce164892ba7ca484501c881de66.zip
Merge branch 'udp-ipv6-use-scratch-helpers'
Paolo Abeni says: ==================== ipv6: udp: exploit dev_scratch helpers When bringing in the recent cache optimization for the UDP protocol, I forgot to leverage the newly introduced scratched area helpers in the UDPv6 code path. As a result, the UDPv6 implementation suffers some unnecessary performance penality when compared to v4. This series aim to bring back UDPv6 on equal footing in respect to v4. The first patch moves the shared helpers to the common include files, while the second uses them in the UDPv6 code. This gives 5-8% performance improvement for a system under flood with small UDPv6 packets. The performance delta is less than the one reported on the original patch set because the UDPv6 code path already leveraged some of the optimization. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/udp.h61
-rw-r--r--net/ipv4/udp.c60
-rw-r--r--net/ipv6/udp.c14
3 files changed, 70 insertions, 65 deletions
diff --git a/include/net/udp.h b/include/net/udp.h
index 1468dbd0f09a..972ce4baab6b 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -302,6 +302,67 @@ struct sock *__udp6_lib_lookup(struct net *net,
struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport);
+/* UDP uses skb->dev_scratch to cache as much information as possible and avoid
+ * possibly multiple cache miss on dequeue()
+ */
+#if BITS_PER_LONG == 64
+
+/* truesize, len and the bit needed to compute skb_csum_unnecessary will be on
+ * cold cache lines at recvmsg time.
+ * skb->len can be stored on 16 bits since the udp header has been already
+ * validated and pulled.
+ */
+struct udp_dev_scratch {
+ u32 truesize;
+ u16 len;
+ bool is_linear;
+ bool csum_unnecessary;
+};
+
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
+}
+
+#else
+static inline unsigned int udp_skb_len(struct sk_buff *skb)
+{
+ return skb->len;
+}
+
+static inline bool udp_skb_csum_unnecessary(struct sk_buff *skb)
+{
+ return skb_csum_unnecessary(skb);
+}
+
+static inline bool udp_skb_is_linear(struct sk_buff *skb)
+{
+ return !skb_is_nonlinear(skb);
+}
+#endif
+
+static inline int copy_linear_skb(struct sk_buff *skb, int len, int off,
+ struct iov_iter *to)
+{
+ int n, copy = len - off;
+
+ n = copy_to_iter(skb->data + off, copy, to);
+ if (n == copy)
+ return 0;
+
+ return -EFAULT;
+}
+
/*
* SNMP statistics for UDP and UDP-Lite
*/
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 47c7aa0501af..86fad2a14ac4 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1163,24 +1163,7 @@ out:
return ret;
}
-/* Copy as much information as possible into skb->dev_scratch to avoid
- * possibly multiple cache miss on dequeue();
- */
#if BITS_PER_LONG == 64
-
-/* we can store multiple info here: truesize, len and the bit needed to
- * compute skb_csum_unnecessary will be on cold cache lines at recvmsg
- * time.
- * skb->len can be stored on 16 bits since the udp header has been already
- * validated and pulled.
- */
-struct udp_dev_scratch {
- u32 truesize;
- u16 len;
- bool is_linear;
- bool csum_unnecessary;
-};
-
static void udp_set_dev_scratch(struct sk_buff *skb)
{
struct udp_dev_scratch *scratch;
@@ -1197,22 +1180,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{
return ((struct udp_dev_scratch *)&skb->dev_scratch)->truesize;
}
-
-static unsigned int udp_skb_len(struct sk_buff *skb)
-{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->len;
-}
-
-static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
-{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->csum_unnecessary;
-}
-
-static bool udp_skb_is_linear(struct sk_buff *skb)
-{
- return ((struct udp_dev_scratch *)&skb->dev_scratch)->is_linear;
-}
-
#else
static void udp_set_dev_scratch(struct sk_buff *skb)
{
@@ -1223,21 +1190,6 @@ static int udp_skb_truesize(struct sk_buff *skb)
{
return skb->dev_scratch;
}
-
-static unsigned int udp_skb_len(struct sk_buff *skb)
-{
- return skb->len;
-}
-
-static bool udp_skb_csum_unnecessary(struct sk_buff *skb)
-{
- return skb_csum_unnecessary(skb);
-}
-
-static bool udp_skb_is_linear(struct sk_buff *skb)
-{
- return !skb_is_nonlinear(skb);
-}
#endif
/* fully reclaim rmem/fwd memory allocated for skb */
@@ -1598,18 +1550,6 @@ busy_check:
}
EXPORT_SYMBOL_GPL(__skb_recv_udp);
-static int copy_linear_skb(struct sk_buff *skb, int len, int off,
- struct iov_iter *to)
-{
- int n, copy = len - off;
-
- n = copy_to_iter(skb->data + off, copy, to);
- if (n == copy)
- return 0;
-
- return -EFAULT;
-}
-
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1d728805729..450829dd6384 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -362,7 +362,7 @@ try_again:
if (!skb)
return err;
- ulen = skb->len;
+ ulen = udp_skb_len(skb);
copied = len;
if (copied > ulen - off)
copied = ulen - off;
@@ -379,14 +379,18 @@ try_again:
if (copied < ulen || peeking ||
(is_udplite && UDP_SKB_CB(skb)->partial_cov)) {
- checksum_valid = !udp_lib_checksum_complete(skb);
+ checksum_valid = udp_skb_csum_unnecessary(skb) ||
+ !__udp_lib_checksum_complete(skb);
if (!checksum_valid)
goto csum_copy_err;
}
- if (checksum_valid || skb_csum_unnecessary(skb))
- err = skb_copy_datagram_msg(skb, off, msg, copied);
- else {
+ if (checksum_valid || udp_skb_csum_unnecessary(skb)) {
+ if (udp_skb_is_linear(skb))
+ err = copy_linear_skb(skb, copied, off, &msg->msg_iter);
+ else
+ err = skb_copy_datagram_msg(skb, off, msg, copied);
+ } else {
err = skb_copy_and_csum_datagram_msg(skb, off, msg);
if (err == -EINVAL)
goto csum_copy_err;