diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-28 20:52:56 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-30 07:02:26 +0200 |
commit | 22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa (patch) | |
tree | 2c9ef18dca9d9a441d92ea57cf7f7a292f4ceb3f /include/net/gen_stats.h | |
parent | macvlan: add source mode (diff) | |
download | linux-22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa.tar.xz linux-22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa.zip |
net: sched: make bstats per cpu and estimator RCU safe
In order to run qdisc's without locking statistics and estimators
need to be handled correctly.
To resolve bstats make the statistics per cpu. And because this is
only needed for qdiscs that are running without locks which is not
the case for most qdiscs in the near future only create percpu
stats when qdiscs set the TCQ_F_CPUSTATS flag.
Next because estimators use the bstats to calculate packets per
second and bytes per second the estimator code paths are updated
to use the per cpu statistics.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/gen_stats.h')
-rw-r--r-- | include/net/gen_stats.h | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index ea4271dceff0..ce3c1281f2a0 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -6,6 +6,11 @@ #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + struct gnet_dump { spinlock_t * lock; struct sk_buff * skb; @@ -27,7 +32,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d); int gnet_stats_copy_basic(struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); +void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); int gnet_stats_copy_rate_est(struct gnet_dump *d, const struct gnet_stats_basic_packed *b, struct gnet_stats_rate_est64 *r); @@ -37,11 +46,13 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); int gnet_stats_finish_copy(struct gnet_dump *d); int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est64 *rate_est); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, struct nlattr *opt); bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, |