diff options
author | David S. Miller <davem@davemloft.net> | 2019-06-01 06:21:18 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-06-01 06:21:18 +0200 |
commit | 0462eaacee493f7e2d87551a35d38be93ca723f8 (patch) | |
tree | c2d454ff64156281c9b4ce071194cb9a47e5dd1a /net/ipv4 | |
parent | Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirshe... (diff) | |
parent | selftests/bpf: measure RTT from xdp using xdping (diff) | |
download | linux-0462eaacee493f7e2d87551a35d38be93ca723f8.tar.xz linux-0462eaacee493f7e2d87551a35d38be93ca723f8.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says:
====================
pull-request: bpf-next 2019-05-31
The following pull-request contains BPF updates for your *net-next* tree.
Lots of exciting new features in the first PR of this developement cycle!
The main changes are:
1) misc verifier improvements, from Alexei.
2) bpftool can now convert btf to valid C, from Andrii.
3) verifier can insert explicit ZEXT insn when requested by 32-bit JITs.
This feature greatly improves BPF speed on 32-bit architectures. From Jiong.
4) cgroups will now auto-detach bpf programs. This fixes issue of thousands
bpf programs got stuck in dying cgroups. From Roman.
5) new bpf_send_signal() helper, from Yonghong.
6) cgroup inet skb programs can signal CN to the stack, from Lawrence.
7) miscellaneous cleanups, from many developers.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/ip_output.c | 34 |
1 files changed, 23 insertions, 11 deletions
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d8f2a22c6ff7..ceca5285d9b4 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -287,16 +287,9 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, return ret; } -static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { unsigned int mtu; - int ret; - - ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); - if (ret) { - kfree_skb(skb); - return ret; - } #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ @@ -315,18 +308,37 @@ static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *sk return ip_finish_output2(net, sk, skb); } +static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) +{ + int ret; + + ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); + switch (ret) { + case NET_XMIT_SUCCESS: + return __ip_finish_output(net, sk, skb); + case NET_XMIT_CN: + return __ip_finish_output(net, sk, skb) ? : ret; + default: + kfree_skb(skb); + return ret; + } +} + static int ip_mc_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { int ret; ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); - if (ret) { + switch (ret) { + case NET_XMIT_SUCCESS: + return dev_loopback_xmit(net, sk, skb); + case NET_XMIT_CN: + return dev_loopback_xmit(net, sk, skb) ? : ret; + default: kfree_skb(skb); return ret; } - - return dev_loopback_xmit(net, sk, skb); } int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) |