summaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h26
1 files changed, 23 insertions, 3 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 5b82d4d94834..207147b4c6b2 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
+ skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
return true;
#endif
return false;
@@ -958,6 +958,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
u32 tcp_reno_ssthresh(struct sock *sk);
+u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
@@ -1220,6 +1221,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+int tcp_filter(struct sock *sk, struct sk_buff *skb);
#undef STATE_TRACE
@@ -1514,11 +1516,26 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
+/* Latencies incurred by various limits for a sender. They are
+ * chronograph-like stats that are mutually exclusive.
+ */
+enum tcp_chrono {
+ TCP_CHRONO_UNSPEC,
+ TCP_CHRONO_BUSY, /* Actively sending data (non-empty write queue) */
+ TCP_CHRONO_RWND_LIMITED, /* Stalled by insufficient receive window */
+ TCP_CHRONO_SNDBUF_LIMITED, /* Stalled by insufficient send buffer */
+ __TCP_CHRONO_MAX,
+};
+
+void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type);
+void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type);
+
/* write queue abstraction */
static inline void tcp_write_queue_purge(struct sock *sk)
{
struct sk_buff *skb;
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
sk_wmem_free_skb(sk, skb);
sk_mem_reclaim(sk);
@@ -1577,8 +1594,10 @@ static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *
static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
{
- if (sk->sk_send_head == skb_unlinked)
+ if (sk->sk_send_head == skb_unlinked) {
sk->sk_send_head = NULL;
+ tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
+ }
if (tcp_sk(sk)->highest_sack == skb_unlinked)
tcp_sk(sk)->highest_sack = NULL;
}
@@ -1600,6 +1619,7 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
/* Queue it, remembering where we must start sending. */
if (sk->sk_send_head == NULL) {
sk->sk_send_head = skb;
+ tcp_chrono_start(sk, TCP_CHRONO_BUSY);
if (tcp_sk(sk)->highest_sack == NULL)
tcp_sk(sk)->highest_sack = skb;
@@ -1807,7 +1827,7 @@ struct tcp_request_sock_ops {
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
const struct request_sock *req,
bool *strict);
- __u32 (*init_seq)(const struct sk_buff *skb);
+ __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,