summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2009-01-13 06:54:40 +0100
committerDavid S. Miller <davem@davemloft.net>2009-01-13 06:54:40 +0100
commita73be040650463eacb95f83d2e6673ac57b4fc59 (patch)
treeae3958ba9830448e18a0dedc82c8d7c031af2476 /net
parentpkt_sched: sch_htb: Consider used jiffies in htb_do_events() (diff)
downloadlinux-a73be040650463eacb95f83d2e6673ac57b4fc59.tar.xz
linux-a73be040650463eacb95f83d2e6673ac57b4fc59.zip
pkt_sched: sch_htb: Break all htb_do_events() after 2 jiffies
Currently htb_do_events() breaks events recounting for a level after 2 jiffies, but there is no reason to repeat this for next levels and increase delays even more (with softirqs disabled). htb_dequeue_tree() can add to this too, btw. In such a case q->now time is invalid anyway. Thanks to Patrick McHardy for spotting an error around earlier version of this patch. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_htb.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 9ca8a26ba507..2f0f0b04d3fb 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -661,12 +661,13 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
* next pending event (0 for no event in pq).
* Note: Applied are events whose have cl->pq_key <= q->now.
*/
-static psched_time_t htb_do_events(struct htb_sched *q, int level)
+static psched_time_t htb_do_events(struct htb_sched *q, int level,
+ unsigned long start)
{
/* don't run for longer than 2 jiffies; 2 is used instead of
1 to simplify things when jiffy is going to be incremented
too soon */
- unsigned long stop_at = jiffies + 2;
+ unsigned long stop_at = start + 2;
while (time_before(jiffies, stop_at)) {
struct htb_class *cl;
long diff;
@@ -845,6 +846,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch);
int level;
psched_time_t next_event;
+ unsigned long start_at;
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
skb = __skb_dequeue(&q->direct_queue);
@@ -857,6 +859,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (!sch->q.qlen)
goto fin;
q->now = psched_get_time();
+ start_at = jiffies;
next_event = q->now + 5 * PSCHED_TICKS_PER_SEC;
@@ -866,7 +869,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
psched_time_t event;
if (q->now >= q->near_ev_cache[level]) {
- event = htb_do_events(q, level);
+ event = htb_do_events(q, level, start_at);
if (!event)
event = q->now + PSCHED_TICKS_PER_SEC;
q->near_ev_cache[level] = event;