diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-06-22 14:44:11 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-26 06:33:16 +0200 |
commit | 5eaa0bd81f93225b6d1972b373ed00ca763052b2 (patch) | |
tree | 44b8ddc198dfe097de65b5a13ffb9c9a65499fa1 | |
parent | arp: RCU change in arp_solicit() (diff) | |
download | linux-5eaa0bd81f93225b6d1972b373ed00ca763052b2.tar.xz linux-5eaa0bd81f93225b6d1972b373ed00ca763052b2.zip |
loopback: use u64_stats_sync infrastructure
Commit 6b10de38f0ef (loopback: Implement 64bit stats on 32bit arches)
introduced 64bit stats in loopback driver, using a private seqcount and
private helpers.
David suggested to introduce a generic infrastructure, added in (net:
Introduce u64_stats_sync infrastructure)
This patch reimplements loopback 64bit stats using the u64_stats_sync
infrastructure.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/loopback.c | 62 |
1 files changed, 16 insertions, 46 deletions
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 09334f8f148b..4dd0510d7a99 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -58,53 +58,15 @@ #include <linux/tcp.h> #include <linux/percpu.h> #include <net/net_namespace.h> +#include <linux/u64_stats_sync.h> struct pcpu_lstats { - u64 packets; - u64 bytes; -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) - seqcount_t seq; -#endif - unsigned long drops; + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; + unsigned long drops; }; -#if BITS_PER_LONG==32 && defined(CONFIG_SMP) -static void inline lstats_update_begin(struct pcpu_lstats *lstats) -{ - write_seqcount_begin(&lstats->seq); -} -static void inline lstats_update_end(struct pcpu_lstats *lstats) -{ - write_seqcount_end(&lstats->seq); -} -static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats) -{ - u64 tpackets, tbytes; - unsigned int seq; - - do { - seq = read_seqcount_begin(&lstats->seq); - tpackets = lstats->packets; - tbytes = lstats->bytes; - } while (read_seqcount_retry(&lstats->seq, seq)); - - *packets += tpackets; - *bytes += tbytes; -} -#else -static void inline lstats_update_begin(struct pcpu_lstats *lstats) -{ -} -static void inline lstats_update_end(struct pcpu_lstats *lstats) -{ -} -static void inline lstats_fetch_and_add(u64 *packets, u64 *bytes, const struct pcpu_lstats *lstats) -{ - *packets += lstats->packets; - *bytes += lstats->bytes; -} -#endif - /* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). @@ -126,10 +88,10 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb, len = skb->len; if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { - lstats_update_begin(lb_stats); + u64_stats_update_begin(&lb_stats->syncp); lb_stats->bytes += len; lb_stats->packets++; - lstats_update_end(lb_stats); + u64_stats_update_end(&lb_stats->syncp); } else lb_stats->drops++; @@ -148,10 +110,18 @@ static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev) pcpu_lstats = (void __percpu __force *)dev->ml_priv; for_each_possible_cpu(i) { const struct pcpu_lstats *lb_stats; + u64 tbytes, tpackets; + unsigned int start; lb_stats = per_cpu_ptr(pcpu_lstats, i); - lstats_fetch_and_add(&packets, &bytes, lb_stats); + do { + start = u64_stats_fetch_begin(&lb_stats->syncp); + tbytes = lb_stats->bytes; + tpackets = lb_stats->packets; + } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); drops += lb_stats->drops; + bytes += tbytes; + packets += tpackets; } stats->rx_packets = packets; stats->tx_packets = packets; |