diff options
author | Wei Wang <weiwan@google.com> | 2017-04-20 23:45:47 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-04-24 20:27:17 +0200 |
commit | 46c2fa39877ed70415ee2b1acfb9129e956f6de4 (patch) | |
tree | a10691f0b2436b49fb8c172ea4be7b16693aa934 /net/ipv4 | |
parent | net/tcp_fastopen: Disable active side TFO in certain scenarios (diff) | |
download | linux-46c2fa39877ed70415ee2b1acfb9129e956f6de4.tar.xz linux-46c2fa39877ed70415ee2b1acfb9129e956f6de4.zip |
net/tcp_fastopen: Add snmp counter for blackhole detection
This counter records the number of times the firewall blackhole issue is
detected and active TFO is disabled.
Signed-off-by: Wei Wang <weiwan@google.com>
Acked-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/proc.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_fastopen.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 |
3 files changed, 6 insertions, 4 deletions
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4ccbf464d1ac..fa44e752a9a3 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -281,6 +281,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL), SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), + SNMP_MIB_ITEM("TCPFastOpenBlackhole", LINUX_MIB_TCPFASTOPENBLACKHOLE), SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING), diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index ff2d30ffc6f3..4af82b914dd4 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -410,10 +410,11 @@ static unsigned long tfo_active_disable_stamp __read_mostly; /* Disable active TFO and record current jiffies and * tfo_active_disable_times */ -void tcp_fastopen_active_disable(void) +void tcp_fastopen_active_disable(struct sock *sk) { atomic_inc(&tfo_active_disable_times); tfo_active_disable_stamp = jiffies; + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE); } /* Reset tfo_active_disable_times to 0 */ @@ -469,7 +470,7 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk) if (p && !rb_next(p)) { skb = rb_entry(p, struct sk_buff, rbnode); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) { - tcp_fastopen_active_disable(); + tcp_fastopen_active_disable(sk); return; } } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9f342a67dc74..5af2f04f8859 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5307,7 +5307,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, */ if (tp->syn_fastopen && !tp->data_segs_in && sk->sk_state == TCP_ESTABLISHED) - tcp_fastopen_active_disable(); + tcp_fastopen_active_disable(sk); tcp_send_challenge_ack(sk, skb); } goto discard; @@ -6061,7 +6061,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { /* Receive out of order FIN after close() */ if (tp->syn_fastopen && th->fin) - tcp_fastopen_active_disable(); + tcp_fastopen_active_disable(sk); tcp_done(sk); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); return 1; |