summaryrefslogtreecommitdiffstats
path: root/net/sched/sch_hhf.c
diff options
context:
space:
mode:
authorJohn Fastabend <john.fastabend@gmail.com>2014-09-28 20:53:29 +0200
committerDavid S. Miller <davem@davemloft.net>2014-09-30 07:02:26 +0200
commit25331d6ce42bcf4b34b6705fce4da15c3fabe62f (patch)
tree1c2d92a220f90f155de62a435753f09ff0c9ce91 /net/sched/sch_hhf.c
parentnet: sched: make bstats per cpu and estimator RCU safe (diff)
downloadlinux-25331d6ce42bcf4b34b6705fce4da15c3fabe62f.tar.xz
linux-25331d6ce42bcf4b34b6705fce4da15c3fabe62f.zip
net: sched: implement qstat helper routines
This adds helpers to manipulate qstats logic and replaces locations that touch the counters directly. This simplifies future patches to push qstats onto per cpu counters. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_hhf.c')
-rw-r--r--net/sched/sch_hhf.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index d85b6812a7d4..15d3aabfe250 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -376,8 +376,8 @@ static unsigned int hhf_drop(struct Qdisc *sch)
struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--;
- sch->qstats.drops++;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_drop(sch);
+ qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb);
}
@@ -395,7 +395,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
bucket = &q->buckets[idx];
bucket_add(bucket, skb);
- sch->qstats.backlog += qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&bucket->bucketchain)) {
unsigned int weight;
@@ -457,7 +457,7 @@ begin:
if (bucket->head) {
skb = dequeue_head(bucket);
sch->q.qlen--;
- sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_qstats_backlog_dec(sch, skb);
}
if (!skb) {