diff options
author | brakmo <brakmo@fb.com> | 2019-05-29 01:59:40 +0200 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2019-06-01 01:41:29 +0200 |
commit | d58c6f7212f4eda59ca94cbfbaa785dde7675456 (patch) | |
tree | 84c6e60b4f3ca303da138d79f55747c6222213eb /samples/bpf/hbm_kern.h | |
parent | bpf: Add cn support to hbm_out_kern.c (diff) | |
download | linux-d58c6f7212f4eda59ca94cbfbaa785dde7675456.tar.xz linux-d58c6f7212f4eda59ca94cbfbaa785dde7675456.zip |
bpf: Add more stats to HBM
Adds more stats to HBM, including average cwnd and rtt of all TCP
flows, percents of packets that are ecn ce marked and distribution
of return values.
Signed-off-by: Lawrence Brakmo <brakmo@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'samples/bpf/hbm_kern.h')
-rw-r--r-- | samples/bpf/hbm_kern.h | 66 |
1 files changed, 64 insertions, 2 deletions
diff --git a/samples/bpf/hbm_kern.h b/samples/bpf/hbm_kern.h index 41384be233b9..be19cf1d5cd5 100644 --- a/samples/bpf/hbm_kern.h +++ b/samples/bpf/hbm_kern.h @@ -65,17 +65,43 @@ struct bpf_map_def SEC("maps") queue_stats = { BPF_ANNOTATE_KV_PAIR(queue_stats, int, struct hbm_queue_stats); struct hbm_pkt_info { + int cwnd; + int rtt; bool is_ip; bool is_tcp; short ecn; }; +static int get_tcp_info(struct __sk_buff *skb, struct hbm_pkt_info *pkti) +{ + struct bpf_sock *sk; + struct bpf_tcp_sock *tp; + + sk = skb->sk; + if (sk) { + sk = bpf_sk_fullsock(sk); + if (sk) { + if (sk->protocol == IPPROTO_TCP) { + tp = bpf_tcp_sock(sk); + if (tp) { + pkti->cwnd = tp->snd_cwnd; + pkti->rtt = tp->srtt_us >> 3; + return 0; + } + } + } + } + return 1; +} + static __always_inline void hbm_get_pkt_info(struct __sk_buff *skb, struct hbm_pkt_info *pkti) { struct iphdr iph; struct ipv6hdr *ip6h; + pkti->cwnd = 0; + pkti->rtt = 0; bpf_skb_load_bytes(skb, 0, &iph, 12); if (iph.version == 6) { ip6h = (struct ipv6hdr *)&iph; @@ -91,6 +117,8 @@ static __always_inline void hbm_get_pkt_info(struct __sk_buff *skb, pkti->is_tcp = false; pkti->ecn = 0; } + if (pkti->is_tcp) + get_tcp_info(skb, pkti); } static __always_inline void hbm_init_vqueue(struct hbm_vqueue *qdp, int rate) @@ -105,8 +133,14 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp, int len, unsigned long long curtime, bool congestion_flag, - bool drop_flag) + bool drop_flag, + bool cwr_flag, + bool ecn_ce_flag, + struct hbm_pkt_info *pkti, + int credit) { + int rv = ALLOW_PKT; + if (qsp != NULL) { // Following is needed for work conserving __sync_add_and_fetch(&(qsp->bytes_total), len); @@ -116,7 +150,7 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp, qsp->firstPacketTime = curtime; qsp->lastPacketTime = curtime; __sync_add_and_fetch(&(qsp->pkts_total), 1); - if (congestion_flag || drop_flag) { + if (congestion_flag) { __sync_add_and_fetch(&(qsp->pkts_marked), 1); __sync_add_and_fetch(&(qsp->bytes_marked), len); } @@ -125,6 +159,34 @@ static __always_inline void hbm_update_stats(struct hbm_queue_stats *qsp, __sync_add_and_fetch(&(qsp->bytes_dropped), len); } + if (ecn_ce_flag) + __sync_add_and_fetch(&(qsp->pkts_ecn_ce), 1); + if (pkti->cwnd) { + __sync_add_and_fetch(&(qsp->sum_cwnd), + pkti->cwnd); + __sync_add_and_fetch(&(qsp->sum_cwnd_cnt), 1); + } + if (pkti->rtt) + __sync_add_and_fetch(&(qsp->sum_rtt), + pkti->rtt); + __sync_add_and_fetch(&(qsp->sum_credit), credit); + + if (drop_flag) + rv = DROP_PKT; + if (cwr_flag) + rv |= 2; + if (rv == DROP_PKT) + __sync_add_and_fetch(&(qsp->returnValCount[0]), + 1); + else if (rv == ALLOW_PKT) + __sync_add_and_fetch(&(qsp->returnValCount[1]), + 1); + else if (rv == 2) + __sync_add_and_fetch(&(qsp->returnValCount[2]), + 1); + else if (rv == 3) + __sync_add_and_fetch(&(qsp->returnValCount[3]), + 1); } } } |