summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/act_api.c253
-rw-r--r--net/sched/act_gact.c142
-rw-r--r--net/sched/act_ipt.c179
-rw-r--r--net/sched/act_mirred.c159
-rw-r--r--net/sched/act_pedit.c166
-rw-r--r--net/sched/act_police.c508
-rw-r--r--net/sched/act_simple.c183
-rw-r--r--net/sched/cls_fw.c25
-rw-r--r--net/sched/sch_htb.c1363
-rw-r--r--net/sched/sch_netem.c4
10 files changed, 1566 insertions, 1416 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index a2587b52e531..835070e9169c 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -33,16 +33,230 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
-#if 0 /* control */
-#define DPRINTK(format, args...) printk(KERN_DEBUG format, ##args)
-#else
-#define DPRINTK(format, args...)
+void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+{
+ unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
+ struct tcf_common **p1p;
+
+ for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
+ if (*p1p == p) {
+ write_lock_bh(hinfo->lock);
+ *p1p = p->tcfc_next;
+ write_unlock_bh(hinfo->lock);
+#ifdef CONFIG_NET_ESTIMATOR
+ gen_kill_estimator(&p->tcfc_bstats,
+ &p->tcfc_rate_est);
#endif
-#if 0 /* data */
-#define D2PRINTK(format, args...) printk(KERN_DEBUG format, ##args)
-#else
-#define D2PRINTK(format, args...)
+ kfree(p);
+ return;
+ }
+ }
+ BUG_TRAP(0);
+}
+EXPORT_SYMBOL(tcf_hash_destroy);
+
+int tcf_hash_release(struct tcf_common *p, int bind,
+ struct tcf_hashinfo *hinfo)
+{
+ int ret = 0;
+
+ if (p) {
+ if (bind)
+ p->tcfc_bindcnt--;
+
+ p->tcfc_refcnt--;
+ if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
+ tcf_hash_destroy(p, hinfo);
+ ret = 1;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL(tcf_hash_release);
+
+static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tc_action *a, struct tcf_hashinfo *hinfo)
+{
+ struct tcf_common *p;
+ int err = 0, index = -1,i = 0, s_i = 0, n_i = 0;
+ struct rtattr *r ;
+
+ read_lock(hinfo->lock);
+
+ s_i = cb->args[0];
+
+ for (i = 0; i < (hinfo->hmask + 1); i++) {
+ p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
+
+ for (; p; p = p->tcfc_next) {
+ index++;
+ if (index < s_i)
+ continue;
+ a->priv = p;
+ a->order = n_i;
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ err = tcf_action_dump_1(skb, a, 0, 0);
+ if (err < 0) {
+ index--;
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+ }
+ r->rta_len = skb->tail - (u8*)r;
+ n_i++;
+ if (n_i >= TCA_ACT_MAX_PRIO)
+ goto done;
+ }
+ }
+done:
+ read_unlock(hinfo->lock);
+ if (n_i)
+ cb->args[0] += n_i;
+ return n_i;
+
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ goto done;
+}
+
+static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
+ struct tcf_hashinfo *hinfo)
+{
+ struct tcf_common *p, *s_p;
+ struct rtattr *r ;
+ int i= 0, n_i = 0;
+
+ r = (struct rtattr*) skb->tail;
+ RTA_PUT(skb, a->order, 0, NULL);
+ RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
+ for (i = 0; i < (hinfo->hmask + 1); i++) {
+ p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
+
+ while (p != NULL) {
+ s_p = p->tcfc_next;
+ if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
+ module_put(a->ops->owner);
+ n_i++;
+ p = s_p;
+ }
+ }
+ RTA_PUT(skb, TCA_FCNT, 4, &n_i);
+ r->rta_len = skb->tail - (u8*)r;
+
+ return n_i;
+rtattr_failure:
+ skb_trim(skb, (u8*)r - skb->data);
+ return -EINVAL;
+}
+
+int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ int type, struct tc_action *a)
+{
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+
+ if (type == RTM_DELACTION) {
+ return tcf_del_walker(skb, a, hinfo);
+ } else if (type == RTM_GETACTION) {
+ return tcf_dump_walker(skb, cb, a, hinfo);
+ } else {
+ printk("tcf_generic_walker: unknown action %d\n", type);
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL(tcf_generic_walker);
+
+struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
+{
+ struct tcf_common *p;
+
+ read_lock(hinfo->lock);
+ for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
+ p = p->tcfc_next) {
+ if (p->tcfc_index == index)
+ break;
+ }
+ read_unlock(hinfo->lock);
+
+ return p;
+}
+EXPORT_SYMBOL(tcf_hash_lookup);
+
+u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
+{
+ u32 val = *idx_gen;
+
+ do {
+ if (++val == 0)
+ val = 1;
+ } while (tcf_hash_lookup(val, hinfo));
+
+ return (*idx_gen = val);
+}
+EXPORT_SYMBOL(tcf_hash_new_index);
+
+int tcf_hash_search(struct tc_action *a, u32 index)
+{
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+ struct tcf_common *p = tcf_hash_lookup(index, hinfo);
+
+ if (p) {
+ a->priv = p;
+ return 1;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tcf_hash_search);
+
+struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
+ struct tcf_hashinfo *hinfo)
+{
+ struct tcf_common *p = NULL;
+ if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
+ if (bind) {
+ p->tcfc_bindcnt++;
+ p->tcfc_refcnt++;
+ }
+ a->priv = p;
+ }
+ return p;
+}
+EXPORT_SYMBOL(tcf_hash_check);
+
+struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int bind, u32 *idx_gen, struct tcf_hashinfo *hinfo)
+{
+ struct tcf_common *p = kzalloc(size, GFP_KERNEL);
+
+ if (unlikely(!p))
+ return p;
+ p->tcfc_refcnt = 1;
+ if (bind)
+ p->tcfc_bindcnt = 1;
+
+ spin_lock_init(&p->tcfc_lock);
+ p->tcfc_stats_lock = &p->tcfc_lock;
+ p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
+ p->tcfc_tm.install = jiffies;
+ p->tcfc_tm.lastuse = jiffies;
+#ifdef CONFIG_NET_ESTIMATOR
+ if (est)
+ gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
+ p->tcfc_stats_lock, est);
#endif
+ a->priv = (void *) p;
+ return p;
+}
+EXPORT_SYMBOL(tcf_hash_create);
+
+void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+{
+ unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
+
+ write_lock_bh(hinfo->lock);
+ p->tcfc_next = hinfo->htab[h];
+ hinfo->htab[h] = p;
+ write_unlock_bh(hinfo->lock);
+}
+EXPORT_SYMBOL(tcf_hash_insert);
static struct tc_action_ops *act_base = NULL;
static DEFINE_RWLOCK(act_mod_lock);
@@ -155,9 +369,6 @@ int tcf_action_exec(struct sk_buff *skb, struct tc_action *act,
if (skb->tc_verd & TC_NCLS) {
skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
- D2PRINTK("(%p)tcf_action_exec: cleared TC_NCLS in %s out %s\n",
- skb, skb->input_dev ? skb->input_dev->name : "xxx",
- skb->dev->name);
ret = TC_ACT_OK;
goto exec_done;
}
@@ -187,8 +398,6 @@ void tcf_action_destroy(struct tc_action *act, int bind)
for (a = act; a; a = act) {
if (a->ops && a->ops->cleanup) {
- DPRINTK("tcf_action_destroy destroying %p next %p\n",
- a, a->next);
if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
module_put(a->ops->owner);
act = act->next;
@@ -331,7 +540,6 @@ struct tc_action *tcf_action_init_1(struct rtattr *rta, struct rtattr *est,
if (*err != ACT_P_CREATED)
module_put(a_o->owner);
a->ops = a_o;
- DPRINTK("tcf_action_init_1: successfull %s\n", act_name);
*err = 0;
return a;
@@ -392,12 +600,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (compat_mode) {
if (a->type == TCA_OLD_COMPAT)
err = gnet_stats_start_copy_compat(skb, 0,
- TCA_STATS, TCA_XSTATS, h->stats_lock, &d);
+ TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d);
else
return 0;
} else
err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
- h->stats_lock, &d);
+ h->tcf_stats_lock, &d);
if (err < 0)
goto errout;
@@ -406,11 +614,11 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (a->ops->get_stats(skb, a) < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, &h->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
#ifdef CONFIG_NET_ESTIMATOR
- gnet_stats_copy_rate_est(&d, &h->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
#endif
- gnet_stats_copy_queue(&d, &h->qstats) < 0)
+ gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
@@ -459,7 +667,6 @@ static int
act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
{
struct sk_buff *skb;
- int err = 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
@@ -468,10 +675,8 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
kfree_skb(skb);
return -EINVAL;
}
- err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
- if (err > 0)
- err = 0;
- return err;
+
+ return rtnl_unicast(skb, pid);
}
static struct tc_action *
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index e75a147ad60f..6cff56696a81 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -34,48 +34,43 @@
#include <linux/tc_act/tc_gact.h>
#include <net/tc_act/tc_gact.h>
-/* use generic hash table */
-#define MY_TAB_SIZE 16
-#define MY_TAB_MASK 15
-
-static u32 idx_gen;
-static struct tcf_gact *tcf_gact_ht[MY_TAB_SIZE];
+#define GACT_TAB_MASK 15
+static struct tcf_common *tcf_gact_ht[GACT_TAB_MASK + 1];
+static u32 gact_idx_gen;
static DEFINE_RWLOCK(gact_lock);
-/* ovewrride the defaults */
-#define tcf_st tcf_gact
-#define tc_st tc_gact
-#define tcf_t_lock gact_lock
-#define tcf_ht tcf_gact_ht
-
-#define CONFIG_NET_ACT_INIT 1
-#include <net/pkt_act.h>
+static struct tcf_hashinfo gact_hash_info = {
+ .htab = tcf_gact_ht,
+ .hmask = GACT_TAB_MASK,
+ .lock = &gact_lock,
+};
#ifdef CONFIG_GACT_PROB
-static int gact_net_rand(struct tcf_gact *p)
+static int gact_net_rand(struct tcf_gact *gact)
{
- if (net_random()%p->pval)
- return p->action;
- return p->paction;
+ if (net_random() % gact->tcfg_pval)
+ return gact->tcf_action;
+ return gact->tcfg_paction;
}
-static int gact_determ(struct tcf_gact *p)
+static int gact_determ(struct tcf_gact *gact)
{
- if (p->bstats.packets%p->pval)
- return p->action;
- return p->paction;
+ if (gact->tcf_bstats.packets % gact->tcfg_pval)
+ return gact->tcf_action;
+ return gact->tcfg_paction;
}
-typedef int (*g_rand)(struct tcf_gact *p);
+typedef int (*g_rand)(struct tcf_gact *gact);
static g_rand gact_rand[MAX_RAND]= { NULL, gact_net_rand, gact_determ };
-#endif
+#endif /* CONFIG_GACT_PROB */
static int tcf_gact_init(struct rtattr *rta, struct rtattr *est,
struct tc_action *a, int ovr, int bind)
{
struct rtattr *tb[TCA_GACT_MAX];
struct tc_gact *parm;
- struct tcf_gact *p;
+ struct tcf_gact *gact;
+ struct tcf_common *pc;
int ret = 0;
if (rta == NULL || rtattr_parse_nested(tb, TCA_GACT_MAX, rta) < 0)
@@ -94,105 +89,106 @@ static int tcf_gact_init(struct rtattr *rta, struct rtattr *est,
return -EOPNOTSUPP;
#endif
- p = tcf_hash_check(parm->index, a, ovr, bind);
- if (p == NULL) {
- p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind);
- if (p == NULL)
+ pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info);
+ if (!pc) {
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*gact),
+ bind, &gact_idx_gen, &gact_hash_info);
+ if (unlikely(!pc))
return -ENOMEM;
ret = ACT_P_CREATED;
} else {
if (!ovr) {
- tcf_hash_release(p, bind);
+ tcf_hash_release(pc, bind, &gact_hash_info);
return -EEXIST;
}
}
- spin_lock_bh(&p->lock);
- p->action = parm->action;
+ gact = to_gact(pc);
+
+ spin_lock_bh(&gact->tcf_lock);
+ gact->tcf_action = parm->action;
#ifdef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB-1] != NULL) {
struct tc_gact_p *p_parm = RTA_DATA(tb[TCA_GACT_PROB-1]);
- p->paction = p_parm->paction;
- p->pval = p_parm->pval;
- p->ptype = p_parm->ptype;
+ gact->tcfg_paction = p_parm->paction;
+ gact->tcfg_pval = p_parm->pval;
+ gact->tcfg_ptype = p_parm->ptype;
}
#endif
- spin_unlock_bh(&p->lock);
+ spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(p);
+ tcf_hash_insert(pc, &gact_hash_info);
return ret;
}
-static int
-tcf_gact_cleanup(struct tc_action *a, int bind)
+static int tcf_gact_cleanup(struct tc_action *a, int bind)
{
- struct tcf_gact *p = PRIV(a, gact);
+ struct tcf_gact *gact = a->priv;
- if (p != NULL)
- return tcf_hash_release(p, bind);
+ if (gact)
+ return tcf_hash_release(&gact->common, bind, &gact_hash_info);
return 0;
}
-static int
-tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
+static int tcf_gact(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
- struct tcf_gact *p = PRIV(a, gact);
+ struct tcf_gact *gact = a->priv;
int action = TC_ACT_SHOT;
- spin_lock(&p->lock);
+ spin_lock(&gact->tcf_lock);
#ifdef CONFIG_GACT_PROB
- if (p->ptype && gact_rand[p->ptype] != NULL)
- action = gact_rand[p->ptype](p);
+ if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL)
+ action = gact_rand[gact->tcfg_ptype](gact);
else
- action = p->action;
+ action = gact->tcf_action;
#else
- action = p->action;
+ action = gact->tcf_action;
#endif
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
+ gact->tcf_bstats.bytes += skb->len;
+ gact->tcf_bstats.packets++;
if (action == TC_ACT_SHOT)
- p->qstats.drops++;
- p->tm.lastuse = jiffies;
- spin_unlock(&p->lock);
+ gact->tcf_qstats.drops++;
+ gact->tcf_tm.lastuse = jiffies;
+ spin_unlock(&gact->tcf_lock);
return action;
}
-static int
-tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb->tail;
struct tc_gact opt;
- struct tcf_gact *p = PRIV(a, gact);
+ struct tcf_gact *gact = a->priv;
struct tcf_t t;
- opt.index = p->index;
- opt.refcnt = p->refcnt - ref;
- opt.bindcnt = p->bindcnt - bind;
- opt.action = p->action;
+ opt.index = gact->tcf_index;
+ opt.refcnt = gact->tcf_refcnt - ref;
+ opt.bindcnt = gact->tcf_bindcnt - bind;
+ opt.action = gact->tcf_action;
RTA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
#ifdef CONFIG_GACT_PROB
- if (p->ptype) {
+ if (gact->tcfg_ptype) {
struct tc_gact_p p_opt;
- p_opt.paction = p->paction;
- p_opt.pval = p->pval;
- p_opt.ptype = p->ptype;
+ p_opt.paction = gact->tcfg_paction;
+ p_opt.pval = gact->tcfg_pval;
+ p_opt.ptype = gact->tcfg_ptype;
RTA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
}
#endif
- t.install = jiffies_to_clock_t(jiffies - p->tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tm.expires);
+ t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
RTA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
return skb->len;
- rtattr_failure:
+rtattr_failure:
skb_trim(skb, b - skb->data);
return -1;
}
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
+ .hinfo = &gact_hash_info,
.type = TCA_ACT_GACT,
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
@@ -208,8 +204,7 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
MODULE_DESCRIPTION("Generic Classifier actions");
MODULE_LICENSE("GPL");
-static int __init
-gact_init_module(void)
+static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
printk("GACT probability on\n");
@@ -219,8 +214,7 @@ gact_init_module(void)
return tcf_register_action(&act_gact_ops);
}
-static void __exit
-gact_cleanup_module(void)
+static void __exit gact_cleanup_module(void)
{
tcf_unregister_action(&act_gact_ops);
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d799e01248c4..d8c9310da6e5 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -38,25 +38,19 @@
#include <linux/netfilter_ipv4/ip_tables.h>
-/* use generic hash table */
-#define MY_TAB_SIZE 16
-#define MY_TAB_MASK 15
-static u32 idx_gen;
-static struct tcf_ipt *tcf_ipt_ht[MY_TAB_SIZE];
-/* ipt hash table lock */
+#define IPT_TAB_MASK 15
+static struct tcf_common *tcf_ipt_ht[IPT_TAB_MASK + 1];
+static u32 ipt_idx_gen;
static DEFINE_RWLOCK(ipt_lock);
-/* ovewrride the defaults */
-#define tcf_st tcf_ipt
-#define tcf_t_lock ipt_lock
-#define tcf_ht tcf_ipt_ht
-
-#define CONFIG_NET_ACT_INIT
-#include <net/pkt_act.h>
+static struct tcf_hashinfo ipt_hash_info = {
+ .htab = tcf_ipt_ht,
+ .hmask = IPT_TAB_MASK,
+ .lock = &ipt_lock,
+};
-static int
-ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
+static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
{
struct ipt_target *target;
int ret = 0;
@@ -65,7 +59,6 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
if (!target)
return -ENOENT;
- DPRINTK("ipt_init_target: found %s\n", target->name);
t->u.kernel.target = target;
ret = xt_check_target(target, AF_INET, t->u.target_size - sizeof(*t),
@@ -76,10 +69,7 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
if (t->u.kernel.target->checkentry
&& !t->u.kernel.target->checkentry(table, NULL,
t->u.kernel.target, t->data,
- t->u.target_size - sizeof(*t),
hook)) {
- DPRINTK("ipt_init_target: check failed for `%s'.\n",
- t->u.kernel.target->name);
module_put(t->u.kernel.target->me);
ret = -EINVAL;
}
@@ -87,40 +77,37 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
return ret;
}
-static void
-ipt_destroy_target(struct ipt_entry_target *t)
+static void ipt_destroy_target(struct ipt_entry_target *t)
{
if (t->u.kernel.target->destroy)
- t->u.kernel.target->destroy(t->u.kernel.target, t->data,
- t->u.target_size - sizeof(*t));
+ t->u.kernel.target->destroy(t->u.kernel.target, t->data);
module_put(t->u.kernel.target->me);
}
-static int
-tcf_ipt_release(struct tcf_ipt *p, int bind)
+static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
{
int ret = 0;
- if (p) {
+ if (ipt) {
if (bind)
- p->bindcnt--;
- p->refcnt--;
- if (p->bindcnt <= 0 && p->refcnt <= 0) {
- ipt_destroy_target(p->t);
- kfree(p->tname);
- kfree(p->t);
- tcf_hash_destroy(p);
+ ipt->tcf_bindcnt--;
+ ipt->tcf_refcnt--;
+ if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
+ ipt_destroy_target(ipt->tcfi_t);
+ kfree(ipt->tcfi_tname);
+ kfree(ipt->tcfi_t);
+ tcf_hash_destroy(&ipt->common, &ipt_hash_info);
ret = ACT_P_DELETED;
}
}
return ret;
}
-static int
-tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
- int ovr, int bind)
+static int tcf_ipt_init(struct rtattr *rta, struct rtattr *est,
+ struct tc_action *a, int ovr, int bind)
{
struct rtattr *tb[TCA_IPT_MAX];
- struct tcf_ipt *p;
+ struct tcf_ipt *ipt;
+ struct tcf_common *pc;
struct ipt_entry_target *td, *t;
char *tname;
int ret = 0, err;
@@ -144,49 +131,51 @@ tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
RTA_PAYLOAD(tb[TCA_IPT_INDEX-1]) >= sizeof(u32))
index = *(u32 *)RTA_DATA(tb[TCA_IPT_INDEX-1]);
- p = tcf_hash_check(index, a, ovr, bind);
- if (p == NULL) {
- p = tcf_hash_create(index, est, a, sizeof(*p), ovr, bind);
- if (p == NULL)
+ pc = tcf_hash_check(index, a, bind, &ipt_hash_info);
+ if (!pc) {
+ pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind,
+ &ipt_idx_gen, &ipt_hash_info);
+ if (unlikely(!pc))
return -ENOMEM;
ret = ACT_P_CREATED;
} else {
if (!ovr) {
- tcf_ipt_release(p, bind);
+ tcf_ipt_release(to_ipt(pc), bind);
return -EEXIST;
}
}
+ ipt = to_ipt(pc);
hook = *(u32 *)RTA_DATA(tb[TCA_IPT_HOOK-1]);
err = -ENOMEM;
tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
- if (tname == NULL)
+ if (unlikely(!tname))
goto err1;
if (tb[TCA_IPT_TABLE - 1] == NULL ||
rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ)
strcpy(tname, "mangle");
t = kmalloc(td->u.target_size, GFP_KERNEL);
- if (t == NULL)
+ if (unlikely(!t))
goto err2;
memcpy(t, td, td->u.target_size);
if ((err = ipt_init_target(t, tname, hook)) < 0)
goto err3;
- spin_lock_bh(&p->lock);
+ spin_lock_bh(&ipt->tcf_lock);
if (ret != ACT_P_CREATED) {
- ipt_destroy_target(p->t);
- kfree(p->tname);
- kfree(p->t);
+ ipt_destroy_target(ipt->tcfi_t);
+ kfree(ipt->tcfi_tname);
+ kfree(ipt->tcfi_t);
}
- p->tname = tname;
- p->t = t;
- p->hook = hook;
- spin_unlock_bh(&p->lock);
+ ipt->tcfi_tname = tname;
+ ipt->tcfi_t = t;
+ ipt->tcfi_hook = hook;
+ spin_unlock_bh(&ipt->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(p);
+ tcf_hash_insert(pc, &ipt_hash_info);
return ret;
err3:
@@ -194,33 +183,32 @@ err3:
err2:
kfree(tname);
err1:
- kfree(p);
+ kfree(pc);
return err;
}
-static int
-tcf_ipt_cleanup(struct tc_action *a, int bind)
+static int tcf_ipt_cleanup(struct tc_action *a, int bind)
{
- struct tcf_ipt *p = PRIV(a, ipt);
- return tcf_ipt_release(p, bind);
+ struct tcf_ipt *ipt = a->priv;
+ return tcf_ipt_release(ipt, bind);
}
-static int
-tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
+static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
+ struct tcf_result *res)
{
int ret = 0, result = 0;
- struct tcf_ipt *p = PRIV(a, ipt);
+ struct tcf_ipt *ipt = a->priv;
if (skb_cloned(skb)) {
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return TC_ACT_UNSPEC;
}
- spin_lock(&p->lock);
+ spin_lock(&ipt->tcf_lock);
- p->tm.lastuse = jiffies;
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
+ ipt->tcf_tm.lastuse = jiffies;
+ ipt->tcf_bstats.bytes += skb->len;
+ ipt->tcf_bstats.packets++;
/* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed
@@ -229,16 +217,17 @@ tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
/* iptables targets take a double skb pointer in case the skb
* needs to be replaced. We don't own the skb, so this must not
* happen. The pskb_expand_head above should make sure of this */
- ret = p->t->u.kernel.target->target(&skb, skb->dev, NULL, p->hook,
- p->t->u.kernel.target, p->t->data,
- NULL);
+ ret = ipt->tcfi_t->u.kernel.target->target(&skb, skb->dev, NULL,
+ ipt->tcfi_hook,
+ ipt->tcfi_t->u.kernel.target,
+ ipt->tcfi_t->data);
switch (ret) {
case NF_ACCEPT:
result = TC_ACT_OK;
break;
case NF_DROP:
result = TC_ACT_SHOT;
- p->qstats.drops++;
+ ipt->tcf_qstats.drops++;
break;
case IPT_CONTINUE:
result = TC_ACT_PIPE;
@@ -249,53 +238,46 @@ tcf_ipt(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
result = TC_POLICE_OK;
break;
}
- spin_unlock(&p->lock);
+ spin_unlock(&ipt->tcf_lock);
return result;
}
-static int
-tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
+ unsigned char *b = skb->tail;
+ struct tcf_ipt *ipt = a->priv;
struct ipt_entry_target *t;
struct tcf_t tm;
struct tc_cnt c;
- unsigned char *b = skb->tail;
- struct tcf_ipt *p = PRIV(a, ipt);
/* for simple targets kernel size == user size
** user name = target name
** for foolproof you need to not assume this
*/
- t = kmalloc(p->t->u.user.target_size, GFP_ATOMIC);
- if (t == NULL)
+ t = kmalloc(ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
+ if (unlikely(!t))
goto rtattr_failure;
- c.bindcnt = p->bindcnt - bind;
- c.refcnt = p->refcnt - ref;
- memcpy(t, p->t, p->t->u.user.target_size);
- strcpy(t->u.user.name, p->t->u.kernel.target->name);
-
- DPRINTK("\ttcf_ipt_dump tablename %s length %d\n", p->tname,
- strlen(p->tname));
- DPRINTK("\tdump target name %s size %d size user %d "
- "data[0] %x data[1] %x\n", p->t->u.kernel.target->name,
- p->t->u.target_size, p->t->u.user.target_size,
- p->t->data[0], p->t->data[1]);
- RTA_PUT(skb, TCA_IPT_TARG, p->t->u.user.target_size, t);
- RTA_PUT(skb, TCA_IPT_INDEX, 4, &p->index);
- RTA_PUT(skb, TCA_IPT_HOOK, 4, &p->hook);
+ c.bindcnt = ipt->tcf_bindcnt - bind;
+ c.refcnt = ipt->tcf_refcnt - ref;
+ memcpy(t, ipt->tcfi_t, ipt->tcfi_t->u.user.target_size);
+ strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
+
+ RTA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t);
+ RTA_PUT(skb, TCA_IPT_INDEX, 4, &ipt->tcf_index);
+ RTA_PUT(skb, TCA_IPT_HOOK, 4, &ipt->tcfi_hook);
RTA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c);
- RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, p->tname);
- tm.install = jiffies_to_clock_t(jiffies - p->tm.install);
- tm.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
- tm.expires = jiffies_to_clock_t(p->tm.expires);
+ RTA_PUT(skb, TCA_IPT_TABLE, IFNAMSIZ, ipt->tcfi_tname);
+ tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
+ tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
+ tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
RTA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm);
kfree(t);
return skb->len;
- rtattr_failure:
+rtattr_failure:
skb_trim(skb, b - skb->data);
kfree(t);
return -1;
@@ -303,6 +285,7 @@ tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
+ .hinfo = &ipt_hash_info,
.type = TCA_ACT_IPT,
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
@@ -318,14 +301,12 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
MODULE_DESCRIPTION("Iptables target actions");
MODULE_LICENSE("GPL");
-static int __init
-ipt_init_module(void)
+static int __init ipt_init_module(void)
{
return tcf_register_action(&act_ipt_ops);
}
-static void __exit
-ipt_cleanup_module(void)
+static void __exit ipt_cleanup_module(void)
{
tcf_unregister_action(&act_ipt_ops);
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index fc562047ecc5..483897271f15 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -39,46 +39,39 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
-
-/* use generic hash table */
-#define MY_TAB_SIZE 8
-#define MY_TAB_MASK (MY_TAB_SIZE - 1)
-static u32 idx_gen;
-static struct tcf_mirred *tcf_mirred_ht[MY_TAB_SIZE];
+#define MIRRED_TAB_MASK 7
+static struct tcf_common *tcf_mirred_ht[MIRRED_TAB_MASK + 1];
+static u32 mirred_idx_gen;
static DEFINE_RWLOCK(mirred_lock);
-/* ovewrride the defaults */
-#define tcf_st tcf_mirred
-#define tc_st tc_mirred
-#define tcf_t_lock mirred_lock
-#define tcf_ht tcf_mirred_ht
-
-#define CONFIG_NET_ACT_INIT 1
-#include <net/pkt_act.h>
+static struct tcf_hashinfo mirred_hash_info = {
+ .htab = tcf_mirred_ht,
+ .hmask = MIRRED_TAB_MASK,
+ .lock = &mirred_lock,
+};
-static inline int
-tcf_mirred_release(struct tcf_mirred *p, int bind)
+static inline int tcf_mirred_release(struct tcf_mirred *m, int bind)
{
- if (p) {
+ if (m) {
if (bind)
- p->bindcnt--;
- p->refcnt--;
- if(!p->bindcnt && p->refcnt <= 0) {
- dev_put(p->dev);
- tcf_hash_destroy(p);
+ m->tcf_bindcnt--;
+ m->tcf_refcnt--;
+ if(!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
+ dev_put(m->tcfm_dev);
+ tcf_hash_destroy(&m->common, &mirred_hash_info);
return 1;
}
}
return 0;
}
-static int
-tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
- int ovr, int bind)
+static int tcf_mirred_init(struct rtattr *rta, struct rtattr *est,
+ struct tc_action *a, int ovr, int bind)
{
struct rtattr *tb[TCA_MIRRED_MAX];
struct tc_mirred *parm;
- struct tcf_mirred *p;
+ struct tcf_mirred *m;
+ struct tcf_common *pc;
struct net_device *dev = NULL;
int ret = 0;
int ok_push = 0;
@@ -110,64 +103,62 @@ tcf_mirred_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
}
}
- p = tcf_hash_check(parm->index, a, ovr, bind);
- if (p == NULL) {
+ pc = tcf_hash_check(parm->index, a, bind, &mirred_hash_info);
+ if (!pc) {
if (!parm->ifindex)
return -EINVAL;
- p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind);
- if (p == NULL)
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind,
+ &mirred_idx_gen, &mirred_hash_info);
+ if (unlikely(!pc))
return -ENOMEM;
ret = ACT_P_CREATED;
} else {
if (!ovr) {
- tcf_mirred_release(p, bind);
+ tcf_mirred_release(to_mirred(pc), bind);
return -EEXIST;
}
}
+ m = to_mirred(pc);
- spin_lock_bh(&p->lock);
- p->action = parm->action;
- p->eaction = parm->eaction;
+ spin_lock_bh(&m->tcf_lock);
+ m->tcf_action = parm->action;
+ m->tcfm_eaction = parm->eaction;
if (parm->ifindex) {
- p->ifindex = parm->ifindex;
+ m->tcfm_ifindex = parm->ifindex;
if (ret != ACT_P_CREATED)
- dev_put(p->dev);
- p->dev = dev;
+ dev_put(m->tcfm_dev);
+ m->tcfm_dev = dev;
dev_hold(dev);
- p->ok_push = ok_push;
+ m->tcfm_ok_push = ok_push;
}
- spin_unlock_bh(&p->lock);
+ spin_unlock_bh(&m->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(p);
+ tcf_hash_insert(pc, &mirred_hash_info);
- DPRINTK("tcf_mirred_init index %d action %d eaction %d device %s "
- "ifindex %d\n", parm->index, parm->action, parm->eaction,
- dev->name, parm->ifindex);
return ret;
}
-static int
-tcf_mirred_cleanup(struct tc_action *a, int bind)
+static int tcf_mirred_cleanup(struct tc_action *a, int bind)
{
- struct tcf_mirred *p = PRIV(a, mirred);
+ struct tcf_mirred *m = a->priv;
- if (p != NULL)
- return tcf_mirred_release(p, bind);
+ if (m)
+ return tcf_mirred_release(m, bind);
return 0;
}
-static int
-tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
+static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
+ struct tcf_result *res)
{
- struct tcf_mirred *p = PRIV(a, mirred);
+ struct tcf_mirred *m = a->priv;
struct net_device *dev;
struct sk_buff *skb2 = NULL;
u32 at = G_TC_AT(skb->tc_verd);
- spin_lock(&p->lock);
+ spin_lock(&m->tcf_lock);
- dev = p->dev;
- p->tm.lastuse = jiffies;
+ dev = m->tcfm_dev;
+ m->tcf_tm.lastuse = jiffies;
if (!(dev->flags&IFF_UP) ) {
if (net_ratelimit())
@@ -176,10 +167,10 @@ tcf_mirred(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
bad_mirred:
if (skb2 != NULL)
kfree_skb(skb2);
- p->qstats.overlimits++;
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
- spin_unlock(&p->lock);
+ m->tcf_qstats.overlimits++;
+ m->tcf_bstats.bytes += skb->len;
+ m->tcf_bstats.packets++;
+ spin_unlock(&m->tcf_lock);
/* should we be asking for packet to be dropped?
* may make sense for redirect case only
*/
@@ -189,59 +180,59 @@ bad_mirred:
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2 == NULL)
goto bad_mirred;
- if (p->eaction != TCA_EGRESS_MIRROR && p->eaction != TCA_EGRESS_REDIR) {
+ if (m->tcfm_eaction != TCA_EGRESS_MIRROR &&
+ m->tcfm_eaction != TCA_EGRESS_REDIR) {
if (net_ratelimit())
- printk("tcf_mirred unknown action %d\n", p->eaction);
+ printk("tcf_mirred unknown action %d\n",
+ m->tcfm_eaction);
goto bad_mirred;
}
- p->bstats.bytes += skb2->len;
- p->bstats.packets++;
+ m->tcf_bstats.bytes += skb2->len;
+ m->tcf_bstats.packets++;
if (!(at & AT_EGRESS))
- if (p->ok_push)
+ if (m->tcfm_ok_push)
skb_push(skb2, skb2->dev->hard_header_len);
/* mirror is always swallowed */
- if (p->eaction != TCA_EGRESS_MIRROR)
+ if (m->tcfm_eaction != TCA_EGRESS_MIRROR)
skb2->tc_verd = SET_TC_FROM(skb2->tc_verd, at);
skb2->dev = dev;
skb2->input_dev = skb->dev;
dev_queue_xmit(skb2);
- spin_unlock(&p->lock);
- return p->action;
+ spin_unlock(&m->tcf_lock);
+ return m->tcf_action;
}
-static int
-tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb->tail;
+ struct tcf_mirred *m = a->priv;
struct tc_mirred opt;
- struct tcf_mirred *p = PRIV(a, mirred);
struct tcf_t t;
- opt.index = p->index;
- opt.action = p->action;
- opt.refcnt = p->refcnt - ref;
- opt.bindcnt = p->bindcnt - bind;
- opt.eaction = p->eaction;
- opt.ifindex = p->ifindex;
- DPRINTK("tcf_mirred_dump index %d action %d eaction %d ifindex %d\n",
- p->index, p->action, p->eaction, p->ifindex);
+ opt.index = m->tcf_index;
+ opt.action = m->tcf_action;
+ opt.refcnt = m->tcf_refcnt - ref;
+ opt.bindcnt = m->tcf_bindcnt - bind;
+ opt.eaction = m->tcfm_eaction;
+ opt.ifindex = m->tcfm_ifindex;
RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
- t.install = jiffies_to_clock_t(jiffies - p->tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tm.expires);
+ t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
return skb->len;
- rtattr_failure:
+rtattr_failure:
skb_trim(skb, b - skb->data);
return -1;
}
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
+ .hinfo = &mirred_hash_info,
.type = TCA_ACT_MIRRED,
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
@@ -257,15 +248,13 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002)");
MODULE_DESCRIPTION("Device Mirror/redirect actions");
MODULE_LICENSE("GPL");
-static int __init
-mirred_init_module(void)
+static int __init mirred_init_module(void)
{
printk("Mirror/redirect action on\n");
return tcf_register_action(&act_mirred_ops);
}
-static void __exit
-mirred_cleanup_module(void)
+static void __exit mirred_cleanup_module(void)
{
tcf_unregister_action(&act_mirred_ops);
}
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index f257475e0e0c..8ac65c219b98 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -33,32 +33,25 @@
#include <linux/tc_act/tc_pedit.h>
#include <net/tc_act/tc_pedit.h>
-
-#define PEDIT_DEB 1
-
-/* use generic hash table */
-#define MY_TAB_SIZE 16
-#define MY_TAB_MASK 15
-static u32 idx_gen;
-static struct tcf_pedit *tcf_pedit_ht[MY_TAB_SIZE];
+#define PEDIT_TAB_MASK 15
+static struct tcf_common *tcf_pedit_ht[PEDIT_TAB_MASK + 1];
+static u32 pedit_idx_gen;
static DEFINE_RWLOCK(pedit_lock);
-#define tcf_st tcf_pedit
-#define tc_st tc_pedit
-#define tcf_t_lock pedit_lock
-#define tcf_ht tcf_pedit_ht
-
-#define CONFIG_NET_ACT_INIT 1
-#include <net/pkt_act.h>
+static struct tcf_hashinfo pedit_hash_info = {
+ .htab = tcf_pedit_ht,
+ .hmask = PEDIT_TAB_MASK,
+ .lock = &pedit_lock,
+};
-static int
-tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
- int ovr, int bind)
+static int tcf_pedit_init(struct rtattr *rta, struct rtattr *est,
+ struct tc_action *a, int ovr, int bind)
{
struct rtattr *tb[TCA_PEDIT_MAX];
struct tc_pedit *parm;
int ret = 0;
struct tcf_pedit *p;
+ struct tcf_common *pc;
struct tc_pedit_key *keys = NULL;
int ksize;
@@ -73,54 +66,56 @@ tcf_pedit_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize)
return -EINVAL;
- p = tcf_hash_check(parm->index, a, ovr, bind);
- if (p == NULL) {
+ pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info);
+ if (!pc) {
if (!parm->nkeys)
return -EINVAL;
- p = tcf_hash_create(parm->index, est, a, sizeof(*p), ovr, bind);
- if (p == NULL)
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
+ &pedit_idx_gen, &pedit_hash_info);
+ if (unlikely(!pc))
return -ENOMEM;
+ p = to_pedit(pc);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- kfree(p);
+ kfree(pc);
return -ENOMEM;
}
ret = ACT_P_CREATED;
} else {
+ p = to_pedit(pc);
if (!ovr) {
- tcf_hash_release(p, bind);
+ tcf_hash_release(pc, bind, &pedit_hash_info);
return -EEXIST;
}
- if (p->nkeys && p->nkeys != parm->nkeys) {
+ if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL)
return -ENOMEM;
}
}
- spin_lock_bh(&p->lock);
- p->flags = parm->flags;
- p->action = parm->action;
+ spin_lock_bh(&p->tcf_lock);
+ p->tcfp_flags = parm->flags;
+ p->tcf_action = parm->action;
if (keys) {
- kfree(p->keys);
- p->keys = keys;
- p->nkeys = parm->nkeys;
+ kfree(p->tcfp_keys);
+ p->tcfp_keys = keys;
+ p->tcfp_nkeys = parm->nkeys;
}
- memcpy(p->keys, parm->keys, ksize);
- spin_unlock_bh(&p->lock);
+ memcpy(p->tcfp_keys, parm->keys, ksize);
+ spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(p);
+ tcf_hash_insert(pc, &pedit_hash_info);
return ret;
}
-static int
-tcf_pedit_cleanup(struct tc_action *a, int bind)
+static int tcf_pedit_cleanup(struct tc_action *a, int bind)
{
- struct tcf_pedit *p = PRIV(a, pedit);
+ struct tcf_pedit *p = a->priv;
- if (p != NULL) {
- struct tc_pedit_key *keys = p->keys;
- if (tcf_hash_release(p, bind)) {
+ if (p) {
+ struct tc_pedit_key *keys = p->tcfp_keys;
+ if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) {
kfree(keys);
return 1;
}
@@ -128,30 +123,30 @@ tcf_pedit_cleanup(struct tc_action *a, int bind)
return 0;
}
-static int
-tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
+static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
+ struct tcf_result *res)
{
- struct tcf_pedit *p = PRIV(a, pedit);
+ struct tcf_pedit *p = a->priv;
int i, munged = 0;
u8 *pptr;
if (!(skb->tc_verd & TC_OK2MUNGE)) {
/* should we set skb->cloned? */
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
- return p->action;
+ return p->tcf_action;
}
}
pptr = skb->nh.raw;
- spin_lock(&p->lock);
+ spin_lock(&p->tcf_lock);
- p->tm.lastuse = jiffies;
+ p->tcf_tm.lastuse = jiffies;
- if (p->nkeys > 0) {
- struct tc_pedit_key *tkey = p->keys;
+ if (p->tcfp_nkeys > 0) {
+ struct tc_pedit_key *tkey = p->tcfp_keys;
- for (i = p->nkeys; i > 0; i--, tkey++) {
+ for (i = p->tcfp_nkeys; i > 0; i--, tkey++) {
u32 *ptr;
int offset = tkey->off;
@@ -169,7 +164,8 @@ tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
printk("offset must be on 32 bit boundaries\n");
goto bad;
}
- if (skb->len < 0 || (offset > 0 && offset > skb->len)) {
+ if (skb->len < 0 ||
+ (offset > 0 && offset > skb->len)) {
printk("offset %d cant exceed pkt length %d\n",
offset, skb->len);
goto bad;
@@ -185,63 +181,47 @@ tcf_pedit(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
skb->tc_verd = SET_TC_MUNGED(skb->tc_verd);
goto done;
} else {
- printk("pedit BUG: index %d\n",p->index);
+ printk("pedit BUG: index %d\n", p->tcf_index);
}
bad:
- p->qstats.overlimits++;
+ p->tcf_qstats.overlimits++;
done:
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
- spin_unlock(&p->lock);
- return p->action;
+ p->tcf_bstats.bytes += skb->len;
+ p->tcf_bstats.packets++;
+ spin_unlock(&p->tcf_lock);
+ return p->tcf_action;
}
-static int
-tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,int bind, int ref)
+static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
{
unsigned char *b = skb->tail;
+ struct tcf_pedit *p = a->priv;
struct tc_pedit *opt;
- struct tcf_pedit *p = PRIV(a, pedit);
struct tcf_t t;
int s;
- s = sizeof(*opt) + p->nkeys * sizeof(struct tc_pedit_key);
+ s = sizeof(*opt) + p->tcfp_nkeys * sizeof(struct tc_pedit_key);
/* netlink spinlocks held above us - must use ATOMIC */
opt = kzalloc(s, GFP_ATOMIC);
- if (opt == NULL)
+ if (unlikely(!opt))
return -ENOBUFS;
- memcpy(opt->keys, p->keys, p->nkeys * sizeof(struct tc_pedit_key));
- opt->index = p->index;
- opt->nkeys = p->nkeys;
- opt->flags = p->flags;
- opt->action = p->action;
- opt->refcnt = p->refcnt - ref;
- opt->bindcnt = p->bindcnt - bind;
-
-
-#ifdef PEDIT_DEB
- {
- /* Debug - get rid of later */
- int i;
- struct tc_pedit_key *key = opt->keys;
-
- for (i=0; i<opt->nkeys; i++, key++) {
- printk( "\n key #%d",i);
- printk( " at %d: val %08x mask %08x",
- (unsigned int)key->off,
- (unsigned int)key->val,
- (unsigned int)key->mask);
- }
- }
-#endif
+ memcpy(opt->keys, p->tcfp_keys,
+ p->tcfp_nkeys * sizeof(struct tc_pedit_key));
+ opt->index = p->tcf_index;
+ opt->nkeys = p->tcfp_nkeys;
+ opt->flags = p->tcfp_flags;
+ opt->action = p->tcf_action;
+ opt->refcnt = p->tcf_refcnt - ref;
+ opt->bindcnt = p->tcf_bindcnt - bind;
RTA_PUT(skb, TCA_PEDIT_PARMS, s, opt);
- t.install = jiffies_to_clock_t(jiffies - p->tm.install);
- t.lastuse = jiffies_to_clock_t(jiffies - p->tm.lastuse);
- t.expires = jiffies_to_clock_t(p->tm.expires);
+ t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
RTA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t);
kfree(opt);
return skb->len;
@@ -252,9 +232,9 @@ rtattr_failure:
return -1;
}
-static
-struct tc_action_ops act_pedit_ops = {
+static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
+ .hinfo = &pedit_hash_info,
.type = TCA_ACT_PEDIT,
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
@@ -270,14 +250,12 @@ MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
MODULE_DESCRIPTION("Generic Packet Editor actions");
MODULE_LICENSE("GPL");
-static int __init
-pedit_init_module(void)
+static int __init pedit_init_module(void)
{
return tcf_register_action(&act_pedit_ops);
}
-static void __exit
-pedit_cleanup_module(void)
+static void __exit pedit_cleanup_module(void)
{
tcf_unregister_action(&act_pedit_ops);
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index da905d7b4b40..fed47b658837 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -32,43 +32,27 @@
#include <net/sock.h>
#include <net/act_api.h>
-#define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
-#define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
-#define PRIV(a) ((struct tcf_police *) (a)->priv)
-
-/* use generic hash table */
-#define MY_TAB_SIZE 16
-#define MY_TAB_MASK 15
-static u32 idx_gen;
-static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
-/* Policer hash table lock */
-static DEFINE_RWLOCK(police_lock);
-
-/* Each policer is serialized by its individual spinlock */
+#define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
+#define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
-static __inline__ unsigned tcf_police_hash(u32 index)
-{
- return index&0xF;
-}
+#define POL_TAB_MASK 15
+static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
+static u32 police_idx_gen;
+static DEFINE_RWLOCK(police_lock);
-static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
-{
- struct tcf_police *p;
+static struct tcf_hashinfo police_hash_info = {
+ .htab = tcf_police_ht,
+ .hmask = POL_TAB_MASK,
+ .lock = &police_lock,
+};
- read_lock(&police_lock);
- for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
- if (p->index == index)
- break;
- }
- read_unlock(&police_lock);
- return p;
-}
+/* Each policer is serialized by its individual spinlock */
#ifdef CONFIG_NET_CLS_ACT
static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
int type, struct tc_action *a)
{
- struct tcf_police *p;
+ struct tcf_common *p;
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct rtattr *r;
@@ -76,10 +60,10 @@ static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *c
s_i = cb->args[0];
- for (i = 0; i < MY_TAB_SIZE; i++) {
- p = tcf_police_ht[tcf_police_hash(i)];
+ for (i = 0; i < (POL_TAB_MASK + 1); i++) {
+ p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
- for (; p; p = p->next) {
+ for (; p; p = p->tcfc_next) {
index++;
if (index < s_i)
continue;
@@ -110,48 +94,26 @@ rtattr_failure:
skb_trim(skb, (u8*)r - skb->data);
goto done;
}
-
-static inline int
-tcf_act_police_hash_search(struct tc_action *a, u32 index)
-{
- struct tcf_police *p = tcf_police_lookup(index);
-
- if (p != NULL) {
- a->priv = p;
- return 1;
- } else {
- return 0;
- }
-}
#endif
-static inline u32 tcf_police_new_index(void)
-{
- do {
- if (++idx_gen == 0)
- idx_gen = 1;
- } while (tcf_police_lookup(idx_gen));
-
- return idx_gen;
-}
-
void tcf_police_destroy(struct tcf_police *p)
{
- unsigned h = tcf_police_hash(p->index);
- struct tcf_police **p1p;
+ unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
+ struct tcf_common **p1p;
- for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
- if (*p1p == p) {
+ for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
+ if (*p1p == &p->common) {
write_lock_bh(&police_lock);
- *p1p = p->next;
+ *p1p = p->tcf_next;
write_unlock_bh(&police_lock);
#ifdef CONFIG_NET_ESTIMATOR
- gen_kill_estimator(&p->bstats, &p->rate_est);
+ gen_kill_estimator(&p->tcf_bstats,
+ &p->tcf_rate_est);
#endif
- if (p->R_tab)
- qdisc_put_rtab(p->R_tab);
- if (p->P_tab)
- qdisc_put_rtab(p->P_tab);
+ if (p->tcfp_R_tab)
+ qdisc_put_rtab(p->tcfp_R_tab);
+ if (p->tcfp_P_tab)
+ qdisc_put_rtab(p->tcfp_P_tab);
kfree(p);
return;
}
@@ -167,7 +129,7 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
int ret = 0, err;
struct rtattr *tb[TCA_POLICE_MAX];
struct tc_police *parm;
- struct tcf_police *p;
+ struct tcf_police *police;
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
@@ -185,27 +147,32 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
return -EINVAL;
- if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
- a->priv = p;
- if (bind) {
- p->bindcnt += 1;
- p->refcnt += 1;
+ if (parm->index) {
+ struct tcf_common *pc;
+
+ pc = tcf_hash_lookup(parm->index, &police_hash_info);
+ if (pc != NULL) {
+ a->priv = pc;
+ police = to_police(pc);
+ if (bind) {
+ police->tcf_bindcnt += 1;
+ police->tcf_refcnt += 1;
+ }
+ if (ovr)
+ goto override;
+ return ret;
}
- if (ovr)
- goto override;
- return ret;
}
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL)
+ police = kzalloc(sizeof(*police), GFP_KERNEL);
+ if (police == NULL)
return -ENOMEM;
-
ret = ACT_P_CREATED;
- p->refcnt = 1;
- spin_lock_init(&p->lock);
- p->stats_lock = &p->lock;
+ police->tcf_refcnt = 1;
+ spin_lock_init(&police->tcf_lock);
+ police->tcf_stats_lock = &police->tcf_lock;
if (bind)
- p->bindcnt = 1;
+ police->tcf_bindcnt = 1;
override:
if (parm->rate.rate) {
err = -ENOMEM;
@@ -215,67 +182,71 @@ override:
if (parm->peakrate.rate) {
P_tab = qdisc_get_rtab(&parm->peakrate,
tb[TCA_POLICE_PEAKRATE-1]);
- if (p->P_tab == NULL) {
+ if (P_tab == NULL) {
qdisc_put_rtab(R_tab);
goto failure;
}
}
}
/* No failure allowed after this point */
- spin_lock_bh(&p->lock);
+ spin_lock_bh(&police->tcf_lock);
if (R_tab != NULL) {
- qdisc_put_rtab(p->R_tab);
- p->R_tab = R_tab;
+ qdisc_put_rtab(police->tcfp_R_tab);
+ police->tcfp_R_tab = R_tab;
}
if (P_tab != NULL) {
- qdisc_put_rtab(p->P_tab);
- p->P_tab = P_tab;
+ qdisc_put_rtab(police->tcfp_P_tab);
+ police->tcfp_P_tab = P_tab;
}
if (tb[TCA_POLICE_RESULT-1])
- p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
- p->toks = p->burst = parm->burst;
- p->mtu = parm->mtu;
- if (p->mtu == 0) {
- p->mtu = ~0;
- if (p->R_tab)
- p->mtu = 255<<p->R_tab->rate.cell_log;
+ police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
+ police->tcfp_toks = police->tcfp_burst = parm->burst;
+ police->tcfp_mtu = parm->mtu;
+ if (police->tcfp_mtu == 0) {
+ police->tcfp_mtu = ~0;
+ if (police->tcfp_R_tab)
+ police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
}
- if (p->P_tab)
- p->ptoks = L2T_P(p, p->mtu);
- p->action = parm->action;
+ if (police->tcfp_P_tab)
+ police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
+ police->tcf_action = parm->action;
#ifdef CONFIG_NET_ESTIMATOR
if (tb[TCA_POLICE_AVRATE-1])
- p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
+ police->tcfp_ewma_rate =
+ *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
if (est)
- gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
+ gen_replace_estimator(&police->tcf_bstats,
+ &police->tcf_rate_est,
+ police->tcf_stats_lock, est);
#endif
- spin_unlock_bh(&p->lock);
+ spin_unlock_bh(&police->tcf_lock);
if (ret != ACT_P_CREATED)
return ret;
- PSCHED_GET_TIME(p->t_c);
- p->index = parm->index ? : tcf_police_new_index();
- h = tcf_police_hash(p->index);
+ PSCHED_GET_TIME(police->tcfp_t_c);
+ police->tcf_index = parm->index ? parm->index :
+ tcf_hash_new_index(&police_idx_gen, &police_hash_info);
+ h = tcf_hash(police->tcf_index, POL_TAB_MASK);
write_lock_bh(&police_lock);
- p->next = tcf_police_ht[h];
- tcf_police_ht[h] = p;
+ police->tcf_next = tcf_police_ht[h];
+ tcf_police_ht[h] = &police->common;
write_unlock_bh(&police_lock);
- a->priv = p;
+ a->priv = police;
return ret;
failure:
if (ret == ACT_P_CREATED)
- kfree(p);
+ kfree(police);
return err;
}
static int tcf_act_police_cleanup(struct tc_action *a, int bind)
{
- struct tcf_police *p = PRIV(a);
+ struct tcf_police *p = a->priv;
if (p != NULL)
return tcf_police_release(p, bind);
@@ -285,86 +256,87 @@ static int tcf_act_police_cleanup(struct tc_action *a, int bind)
static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
struct tcf_result *res)
{
+ struct tcf_police *police = a->priv;
psched_time_t now;
- struct tcf_police *p = PRIV(a);
long toks;
long ptoks = 0;
- spin_lock(&p->lock);
+ spin_lock(&police->tcf_lock);
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
+ police->tcf_bstats.bytes += skb->len;
+ police->tcf_bstats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
- if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
- p->qstats.overlimits++;
- spin_unlock(&p->lock);
- return p->action;
+ if (police->tcfp_ewma_rate &&
+ police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
+ police->tcf_qstats.overlimits++;
+ spin_unlock(&police->tcf_lock);
+ return police->tcf_action;
}
#endif
- if (skb->len <= p->mtu) {
- if (p->R_tab == NULL) {
- spin_unlock(&p->lock);
- return p->result;
+ if (skb->len <= police->tcfp_mtu) {
+ if (police->tcfp_R_tab == NULL) {
+ spin_unlock(&police->tcf_lock);
+ return police->tcfp_result;
}
PSCHED_GET_TIME(now);
- toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
-
- if (p->P_tab) {
- ptoks = toks + p->ptoks;
- if (ptoks > (long)L2T_P(p, p->mtu))
- ptoks = (long)L2T_P(p, p->mtu);
- ptoks -= L2T_P(p, skb->len);
+ toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
+ police->tcfp_burst);
+ if (police->tcfp_P_tab) {
+ ptoks = toks + police->tcfp_ptoks;
+ if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
+ ptoks = (long)L2T_P(police, police->tcfp_mtu);
+ ptoks -= L2T_P(police, skb->len);
}
- toks += p->toks;
- if (toks > (long)p->burst)
- toks = p->burst;
- toks -= L2T(p, skb->len);
-
+ toks += police->tcfp_toks;
+ if (toks > (long)police->tcfp_burst)
+ toks = police->tcfp_burst;
+ toks -= L2T(police, skb->len);
if ((toks|ptoks) >= 0) {
- p->t_c = now;
- p->toks = toks;
- p->ptoks = ptoks;
- spin_unlock(&p->lock);
- return p->result;
+ police->tcfp_t_c = now;
+ police->tcfp_toks = toks;
+ police->tcfp_ptoks = ptoks;
+ spin_unlock(&police->tcf_lock);
+ return police->tcfp_result;
}
}
- p->qstats.overlimits++;
- spin_unlock(&p->lock);
- return p->action;
+ police->tcf_qstats.overlimits++;
+ spin_unlock(&police->tcf_lock);
+ return police->tcf_action;
}
static int
tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
unsigned char *b = skb->tail;
+ struct tcf_police *police = a->priv;
struct tc_police opt;
- struct tcf_police *p = PRIV(a);
-
- opt.index = p->index;
- opt.action = p->action;
- opt.mtu = p->mtu;
- opt.burst = p->burst;
- opt.refcnt = p->refcnt - ref;
- opt.bindcnt = p->bindcnt - bind;
- if (p->R_tab)
- opt.rate = p->R_tab->rate;
+
+ opt.index = police->tcf_index;
+ opt.action = police->tcf_action;
+ opt.mtu = police->tcfp_mtu;
+ opt.burst = police->tcfp_burst;
+ opt.refcnt = police->tcf_refcnt - ref;
+ opt.bindcnt = police->tcf_bindcnt - bind;
+ if (police->tcfp_R_tab)
+ opt.rate = police->tcfp_R_tab->rate;
else
memset(&opt.rate, 0, sizeof(opt.rate));
- if (p->P_tab)
- opt.peakrate = p->P_tab->rate;
+ if (police->tcfp_P_tab)
+ opt.peakrate = police->tcfp_P_tab->rate;
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
- if (p->result)
- RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
+ if (police->tcfp_result)
+ RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
+ &police->tcfp_result);
#ifdef CONFIG_NET_ESTIMATOR
- if (p->ewma_rate)
- RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
+ if (police->tcfp_ewma_rate)
+ RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
#endif
return skb->len;
@@ -379,13 +351,14 @@ MODULE_LICENSE("GPL");
static struct tc_action_ops act_police_ops = {
.kind = "police",
+ .hinfo = &police_hash_info,
.type = TCA_ID_POLICE,
.capab = TCA_CAP_NONE,
.owner = THIS_MODULE,
.act = tcf_act_police,
.dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup,
- .lookup = tcf_act_police_hash_search,
+ .lookup = tcf_hash_search,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};
@@ -407,10 +380,39 @@ module_exit(police_cleanup_module);
#else /* CONFIG_NET_CLS_ACT */
-struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
+static struct tcf_common *tcf_police_lookup(u32 index)
{
- unsigned h;
- struct tcf_police *p;
+ struct tcf_hashinfo *hinfo = &police_hash_info;
+ struct tcf_common *p;
+
+ read_lock(hinfo->lock);
+ for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
+ p = p->tcfc_next) {
+ if (p->tcfc_index == index)
+ break;
+ }
+ read_unlock(hinfo->lock);
+
+ return p;
+}
+
+static u32 tcf_police_new_index(void)
+{
+ u32 *idx_gen = &police_idx_gen;
+ u32 val = *idx_gen;
+
+ do {
+ if (++val == 0)
+ val = 1;
+ } while (tcf_police_lookup(val));
+
+ return (*idx_gen = val);
+}
+
+struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
+{
+ unsigned int h;
+ struct tcf_police *police;
struct rtattr *tb[TCA_POLICE_MAX];
struct tc_police *parm;
@@ -423,149 +425,158 @@ struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
- if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
- p->refcnt++;
- return p;
- }
+ if (parm->index) {
+ struct tcf_common *pc;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL)
+ pc = tcf_police_lookup(parm->index);
+ if (pc) {
+ police = to_police(pc);
+ police->tcf_refcnt++;
+ return police;
+ }
+ }
+ police = kzalloc(sizeof(*police), GFP_KERNEL);
+ if (unlikely(!police))
return NULL;
- p->refcnt = 1;
- spin_lock_init(&p->lock);
- p->stats_lock = &p->lock;
+ police->tcf_refcnt = 1;
+ spin_lock_init(&police->tcf_lock);
+ police->tcf_stats_lock = &police->tcf_lock;
if (parm->rate.rate) {
- p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
- if (p->R_tab == NULL)
+ police->tcfp_R_tab =
+ qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
+ if (police->tcfp_R_tab == NULL)
goto failure;
if (parm->peakrate.rate) {
- p->P_tab = qdisc_get_rtab(&parm->peakrate,
- tb[TCA_POLICE_PEAKRATE-1]);
- if (p->P_tab == NULL)
+ police->tcfp_P_tab =
+ qdisc_get_rtab(&parm->peakrate,
+ tb[TCA_POLICE_PEAKRATE-1]);
+ if (police->tcfp_P_tab == NULL)
goto failure;
}
}
if (tb[TCA_POLICE_RESULT-1]) {
if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
goto failure;
- p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
+ police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
}
#ifdef CONFIG_NET_ESTIMATOR
if (tb[TCA_POLICE_AVRATE-1]) {
if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
goto failure;
- p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
+ police->tcfp_ewma_rate =
+ *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
}
#endif
- p->toks = p->burst = parm->burst;
- p->mtu = parm->mtu;
- if (p->mtu == 0) {
- p->mtu = ~0;
- if (p->R_tab)
- p->mtu = 255<<p->R_tab->rate.cell_log;
+ police->tcfp_toks = police->tcfp_burst = parm->burst;
+ police->tcfp_mtu = parm->mtu;
+ if (police->tcfp_mtu == 0) {
+ police->tcfp_mtu = ~0;
+ if (police->tcfp_R_tab)
+ police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
}
- if (p->P_tab)
- p->ptoks = L2T_P(p, p->mtu);
- PSCHED_GET_TIME(p->t_c);
- p->index = parm->index ? : tcf_police_new_index();
- p->action = parm->action;
+ if (police->tcfp_P_tab)
+ police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
+ PSCHED_GET_TIME(police->tcfp_t_c);
+ police->tcf_index = parm->index ? parm->index :
+ tcf_police_new_index();
+ police->tcf_action = parm->action;
#ifdef CONFIG_NET_ESTIMATOR
if (est)
- gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
+ gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
+ police->tcf_stats_lock, est);
#endif
- h = tcf_police_hash(p->index);
+ h = tcf_hash(police->tcf_index, POL_TAB_MASK);
write_lock_bh(&police_lock);
- p->next = tcf_police_ht[h];
- tcf_police_ht[h] = p;
+ police->tcf_next = tcf_police_ht[h];
+ tcf_police_ht[h] = &police->common;
write_unlock_bh(&police_lock);
- return p;
+ return police;
failure:
- if (p->R_tab)
- qdisc_put_rtab(p->R_tab);
- kfree(p);
+ if (police->tcfp_R_tab)
+ qdisc_put_rtab(police->tcfp_R_tab);
+ kfree(police);
return NULL;
}
-int tcf_police(struct sk_buff *skb, struct tcf_police *p)
+int tcf_police(struct sk_buff *skb, struct tcf_police *police)
{
psched_time_t now;
long toks;
long ptoks = 0;
- spin_lock(&p->lock);
+ spin_lock(&police->tcf_lock);
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
+ police->tcf_bstats.bytes += skb->len;
+ police->tcf_bstats.packets++;
#ifdef CONFIG_NET_ESTIMATOR
- if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
- p->qstats.overlimits++;
- spin_unlock(&p->lock);
- return p->action;
+ if (police->tcfp_ewma_rate &&
+ police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
+ police->tcf_qstats.overlimits++;
+ spin_unlock(&police->tcf_lock);
+ return police->tcf_action;
}
#endif
-
- if (skb->len <= p->mtu) {
- if (p->R_tab == NULL) {
- spin_unlock(&p->lock);
- return p->result;
+ if (skb->len <= police->tcfp_mtu) {
+ if (police->tcfp_R_tab == NULL) {
+ spin_unlock(&police->tcf_lock);
+ return police->tcfp_result;
}
PSCHED_GET_TIME(now);
-
- toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
-
- if (p->P_tab) {
- ptoks = toks + p->ptoks;
- if (ptoks > (long)L2T_P(p, p->mtu))
- ptoks = (long)L2T_P(p, p->mtu);
- ptoks -= L2T_P(p, skb->len);
+ toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
+ police->tcfp_burst);
+ if (police->tcfp_P_tab) {
+ ptoks = toks + police->tcfp_ptoks;
+ if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
+ ptoks = (long)L2T_P(police, police->tcfp_mtu);
+ ptoks -= L2T_P(police, skb->len);
}
- toks += p->toks;
- if (toks > (long)p->burst)
- toks = p->burst;
- toks -= L2T(p, skb->len);
-
+ toks += police->tcfp_toks;
+ if (toks > (long)police->tcfp_burst)
+ toks = police->tcfp_burst;
+ toks -= L2T(police, skb->len);
if ((toks|ptoks) >= 0) {
- p->t_c = now;
- p->toks = toks;
- p->ptoks = ptoks;
- spin_unlock(&p->lock);
- return p->result;
+ police->tcfp_t_c = now;
+ police->tcfp_toks = toks;
+ police->tcfp_ptoks = ptoks;
+ spin_unlock(&police->tcf_lock);
+ return police->tcfp_result;
}
}
- p->qstats.overlimits++;
- spin_unlock(&p->lock);
- return p->action;
+ police->tcf_qstats.overlimits++;
+ spin_unlock(&police->tcf_lock);
+ return police->tcf_action;
}
EXPORT_SYMBOL(tcf_police);
-int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
+int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
{
- unsigned char *b = skb->tail;
+ unsigned char *b = skb->tail;
struct tc_police opt;
- opt.index = p->index;
- opt.action = p->action;
- opt.mtu = p->mtu;
- opt.burst = p->burst;
- if (p->R_tab)
- opt.rate = p->R_tab->rate;
+ opt.index = police->tcf_index;
+ opt.action = police->tcf_action;
+ opt.mtu = police->tcfp_mtu;
+ opt.burst = police->tcfp_burst;
+ if (police->tcfp_R_tab)
+ opt.rate = police->tcfp_R_tab->rate;
else
memset(&opt.rate, 0, sizeof(opt.rate));
- if (p->P_tab)
- opt.peakrate = p->P_tab->rate;
+ if (police->tcfp_P_tab)
+ opt.peakrate = police->tcfp_P_tab->rate;
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
- if (p->result)
- RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
+ if (police->tcfp_result)
+ RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
+ &police->tcfp_result);
#ifdef CONFIG_NET_ESTIMATOR
- if (p->ewma_rate)
- RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
+ if (police->tcfp_ewma_rate)
+ RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
#endif
return skb->len;
@@ -574,19 +585,20 @@ rtattr_failure:
return -1;
}
-int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
+int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
{
struct gnet_dump d;
if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, p->stats_lock, &d) < 0)
+ TCA_XSTATS, police->tcf_stats_lock,
+ &d) < 0)
goto errout;
- if (gnet_stats_copy_basic(&d, &p->bstats) < 0 ||
+ if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
#ifdef CONFIG_NET_ESTIMATOR
- gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 ||
+ gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
#endif
- gnet_stats_copy_queue(&d, &p->qstats) < 0)
+ gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 17105c82537f..901571a67707 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -20,54 +20,175 @@
#define TCA_ACT_SIMP 22
-/* XXX: Hide all these common elements under some macro
- * probably
-*/
#include <linux/tc_act/tc_defact.h>
#include <net/tc_act/tc_defact.h>
-/* use generic hash table with 8 buckets */
-#define MY_TAB_SIZE 8
-#define MY_TAB_MASK (MY_TAB_SIZE - 1)
-static u32 idx_gen;
-static struct tcf_defact *tcf_simp_ht[MY_TAB_SIZE];
+#define SIMP_TAB_MASK 7
+static struct tcf_common *tcf_simp_ht[SIMP_TAB_MASK + 1];
+static u32 simp_idx_gen;
static DEFINE_RWLOCK(simp_lock);
-/* override the defaults */
-#define tcf_st tcf_defact
-#define tc_st tc_defact
-#define tcf_t_lock simp_lock
-#define tcf_ht tcf_simp_ht
-
-#define CONFIG_NET_ACT_INIT 1
-#include <net/pkt_act.h>
-#include <net/act_generic.h>
+static struct tcf_hashinfo simp_hash_info = {
+ .htab = tcf_simp_ht,
+ .hmask = SIMP_TAB_MASK,
+ .lock = &simp_lock,
+};
static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res)
{
- struct tcf_defact *p = PRIV(a, defact);
+ struct tcf_defact *d = a->priv;
- spin_lock(&p->lock);
- p->tm.lastuse = jiffies;
- p->bstats.bytes += skb->len;
- p->bstats.packets++;
+ spin_lock(&d->tcf_lock);
+ d->tcf_tm.lastuse = jiffies;
+ d->tcf_bstats.bytes += skb->len;
+ d->tcf_bstats.packets++;
/* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello"
* then it would look like "hello_3" (without quotes)
**/
- printk("simple: %s_%d\n", (char *)p->defdata, p->bstats.packets);
- spin_unlock(&p->lock);
- return p->action;
+ printk("simple: %s_%d\n",
+ (char *)d->tcfd_defdata, d->tcf_bstats.packets);
+ spin_unlock(&d->tcf_lock);
+ return d->tcf_action;
+}
+
+static int tcf_simp_release(struct tcf_defact *d, int bind)
+{
+ int ret = 0;
+ if (d) {
+ if (bind)
+ d->tcf_bindcnt--;
+ d->tcf_refcnt--;
+ if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) {
+ kfree(d->tcfd_defdata);
+ tcf_hash_destroy(&d->common, &simp_hash_info);
+ ret = 1;
+ }
+ }
+ return ret;
+}
+
+static int alloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata)
+{
+ d->tcfd_defdata = kmalloc(datalen, GFP_KERNEL);
+ if (unlikely(!d->tcfd_defdata))
+ return -ENOMEM;
+ d->tcfd_datalen = datalen;
+ memcpy(d->tcfd_defdata, defdata, datalen);
+ return 0;
+}
+
+static int realloc_defdata(struct tcf_defact *d, u32 datalen, void *defdata)
+{
+ kfree(d->tcfd_defdata);
+ return alloc_defdata(d, datalen, defdata);
+}
+
+static int tcf_simp_init(struct rtattr *rta, struct rtattr *est,
+ struct tc_action *a, int ovr, int bind)
+{
+ struct rtattr *tb[TCA_DEF_MAX];
+ struct tc_defact *parm;
+ struct tcf_defact *d;
+ struct tcf_common *pc;
+ void *defdata;
+ u32 datalen = 0;
+ int ret = 0;
+
+ if (rta == NULL || rtattr_parse_nested(tb, TCA_DEF_MAX, rta) < 0)
+ return -EINVAL;
+
+ if (tb[TCA_DEF_PARMS - 1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_DEF_PARMS - 1]) < sizeof(*parm))
+ return -EINVAL;
+
+ parm = RTA_DATA(tb[TCA_DEF_PARMS - 1]);
+ defdata = RTA_DATA(tb[TCA_DEF_DATA - 1]);
+ if (defdata == NULL)
+ return -EINVAL;
+
+ datalen = RTA_PAYLOAD(tb[TCA_DEF_DATA - 1]);
+ if (datalen <= 0)
+ return -EINVAL;
+
+ pc = tcf_hash_check(parm->index, a, bind, &simp_hash_info);
+ if (!pc) {
+ pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind,
+ &simp_idx_gen, &simp_hash_info);
+ if (unlikely(!pc))
+ return -ENOMEM;
+
+ d = to_defact(pc);
+ ret = alloc_defdata(d, datalen, defdata);
+ if (ret < 0) {
+ kfree(pc);
+ return ret;
+ }
+ ret = ACT_P_CREATED;
+ } else {
+ d = to_defact(pc);
+ if (!ovr) {
+ tcf_simp_release(d, bind);
+ return -EEXIST;
+ }
+ realloc_defdata(d, datalen, defdata);
+ }
+
+ spin_lock_bh(&d->tcf_lock);
+ d->tcf_action = parm->action;
+ spin_unlock_bh(&d->tcf_lock);
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(pc, &simp_hash_info);
+ return ret;
+}
+
+static inline int tcf_simp_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_defact *d = a->priv;
+
+ if (d)
+ return tcf_simp_release(d, bind);
+ return 0;
+}
+
+static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb->tail;
+ struct tcf_defact *d = a->priv;
+ struct tc_defact opt;
+ struct tcf_t t;
+
+ opt.index = d->tcf_index;
+ opt.refcnt = d->tcf_refcnt - ref;
+ opt.bindcnt = d->tcf_bindcnt - bind;
+ opt.action = d->tcf_action;
+ RTA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
+ RTA_PUT(skb, TCA_DEF_DATA, d->tcfd_datalen, d->tcfd_defdata);
+ t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
+ t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
+ t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
+ RTA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
}
static struct tc_action_ops act_simp_ops = {
- .kind = "simple",
- .type = TCA_ACT_SIMP,
- .capab = TCA_CAP_NONE,
- .owner = THIS_MODULE,
- .act = tcf_simp,
- tca_use_default_ops
+ .kind = "simple",
+ .hinfo = &simp_hash_info,
+ .type = TCA_ACT_SIMP,
+ .capab = TCA_CAP_NONE,
+ .owner = THIS_MODULE,
+ .act = tcf_simp,
+ .dump = tcf_simp_dump,
+ .cleanup = tcf_simp_cleanup,
+ .init = tcf_simp_init,
+ .walk = tcf_generic_walker,
};
MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index e6973d9b686d..e54acc6bcccd 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -50,6 +50,7 @@
struct fw_head
{
struct fw_filter *ht[HTSIZE];
+ u32 mask;
};
struct fw_filter
@@ -101,7 +102,7 @@ static int fw_classify(struct sk_buff *skb, struct tcf_proto *tp,
struct fw_filter *f;
int r;
#ifdef CONFIG_NETFILTER
- u32 id = skb->nfmark;
+ u32 id = skb->nfmark & head->mask;
#else
u32 id = 0;
#endif
@@ -209,7 +210,9 @@ static int
fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
struct rtattr **tb, struct rtattr **tca, unsigned long base)
{
+ struct fw_head *head = (struct fw_head *)tp->root;
struct tcf_exts e;
+ u32 mask;
int err;
err = tcf_exts_validate(tp, tb, tca[TCA_RATE-1], &e, &fw_ext_map);
@@ -232,6 +235,15 @@ fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
}
#endif /* CONFIG_NET_CLS_IND */
+ if (tb[TCA_FW_MASK-1]) {
+ if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32))
+ goto errout;
+ mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]);
+ if (mask != head->mask)
+ goto errout;
+ } else if (head->mask != 0xFFFFFFFF)
+ goto errout;
+
tcf_exts_change(tp, &f->exts, &e);
return 0;
@@ -267,9 +279,17 @@ static int fw_change(struct tcf_proto *tp, unsigned long base,
return -EINVAL;
if (head == NULL) {
+ u32 mask = 0xFFFFFFFF;
+ if (tb[TCA_FW_MASK-1]) {
+ if (RTA_PAYLOAD(tb[TCA_FW_MASK-1]) != sizeof(u32))
+ return -EINVAL;
+ mask = *(u32*)RTA_DATA(tb[TCA_FW_MASK-1]);
+ }
+
head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
if (head == NULL)
return -ENOBUFS;
+ head->mask = mask;
tcf_tree_lock(tp);
tp->root = head;
@@ -330,6 +350,7 @@ static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
static int fw_dump(struct tcf_proto *tp, unsigned long fh,
struct sk_buff *skb, struct tcmsg *t)
{
+ struct fw_head *head = (struct fw_head *)tp->root;
struct fw_filter *f = (struct fw_filter*)fh;
unsigned char *b = skb->tail;
struct rtattr *rta;
@@ -351,6 +372,8 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
if (strlen(f->indev))
RTA_PUT(skb, TCA_FW_INDEV, IFNAMSIZ, f->indev);
#endif /* CONFIG_NET_CLS_IND */
+ if (head->mask != 0xFFFFFFFF)
+ RTA_PUT(skb, TCA_FW_MASK, 4, &head->mask);
if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
goto rtattr_failure;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 880a3394a51f..bb3ddd4784b1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1,4 +1,4 @@
-/* vim: ts=8 sw=8
+/*
* net/sched/sch_htb.c Hierarchical token bucket, feed tree version
*
* This program is free software; you can redistribute it and/or
@@ -68,218 +68,165 @@
one less than their parent.
*/
-#define HTB_HSIZE 16 /* classid hash size */
-#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
-#undef HTB_DEBUG /* compile debugging support (activated by tc tool) */
-#define HTB_RATECM 1 /* whether to use rate computer */
-#define HTB_HYSTERESIS 1/* whether to use mode hysteresis for speedup */
-#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
-#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
-#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
+#define HTB_HSIZE 16 /* classid hash size */
+#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
+#define HTB_RATECM 1 /* whether to use rate computer */
+#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
+#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
#if HTB_VER >> 16 != TC_HTB_PROTOVER
#error "Mismatched sch_htb.c and pkt_sch.h"
#endif
-/* debugging support; S is subsystem, these are defined:
- 0 - netlink messages
- 1 - enqueue
- 2 - drop & requeue
- 3 - dequeue main
- 4 - dequeue one prio DRR part
- 5 - dequeue class accounting
- 6 - class overlimit status computation
- 7 - hint tree
- 8 - event queue
- 10 - rate estimator
- 11 - classifier
- 12 - fast dequeue cache
-
- L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
- q->debug uint32 contains 16 2-bit fields one for subsystem starting
- from LSB
- */
-#ifdef HTB_DEBUG
-#define HTB_DBG_COND(S,L) (((q->debug>>(2*S))&3) >= L)
-#define HTB_DBG(S,L,FMT,ARG...) if (HTB_DBG_COND(S,L)) \
- printk(KERN_DEBUG FMT,##ARG)
-#define HTB_CHCL(cl) BUG_TRAP((cl)->magic == HTB_CMAGIC)
-#define HTB_PASSQ q,
-#define HTB_ARGQ struct htb_sched *q,
-#define static
-#undef __inline__
-#define __inline__
-#undef inline
-#define inline
-#define HTB_CMAGIC 0xFEFAFEF1
-#define htb_safe_rb_erase(N,R) do { BUG_TRAP((N)->rb_color != -1); \
- if ((N)->rb_color == -1) break; \
- rb_erase(N,R); \
- (N)->rb_color = -1; } while (0)
-#else
-#define HTB_DBG_COND(S,L) (0)
-#define HTB_DBG(S,L,FMT,ARG...)
-#define HTB_PASSQ
-#define HTB_ARGQ
-#define HTB_CHCL(cl)
-#define htb_safe_rb_erase(N,R) rb_erase(N,R)
-#endif
-
-
/* used internaly to keep status of single class */
enum htb_cmode {
- HTB_CANT_SEND, /* class can't send and can't borrow */
- HTB_MAY_BORROW, /* class can't send but may borrow */
- HTB_CAN_SEND /* class can send */
+ HTB_CANT_SEND, /* class can't send and can't borrow */
+ HTB_MAY_BORROW, /* class can't send but may borrow */
+ HTB_CAN_SEND /* class can send */
};
/* interior & leaf nodes; props specific to leaves are marked L: */
-struct htb_class
-{
-#ifdef HTB_DEBUG
- unsigned magic;
-#endif
- /* general class parameters */
- u32 classid;
- struct gnet_stats_basic bstats;
- struct gnet_stats_queue qstats;
- struct gnet_stats_rate_est rate_est;
- struct tc_htb_xstats xstats;/* our special stats */
- int refcnt; /* usage count of this class */
+struct htb_class {
+ /* general class parameters */
+ u32 classid;
+ struct gnet_stats_basic bstats;
+ struct gnet_stats_queue qstats;
+ struct gnet_stats_rate_est rate_est;
+ struct tc_htb_xstats xstats; /* our special stats */
+ int refcnt; /* usage count of this class */
#ifdef HTB_RATECM
- /* rate measurement counters */
- unsigned long rate_bytes,sum_bytes;
- unsigned long rate_packets,sum_packets;
+ /* rate measurement counters */
+ unsigned long rate_bytes, sum_bytes;
+ unsigned long rate_packets, sum_packets;
#endif
- /* topology */
- int level; /* our level (see above) */
- struct htb_class *parent; /* parent class */
- struct list_head hlist; /* classid hash list item */
- struct list_head sibling; /* sibling list item */
- struct list_head children; /* children list */
-
- union {
- struct htb_class_leaf {
- struct Qdisc *q;
- int prio;
- int aprio;
- int quantum;
- int deficit[TC_HTB_MAXDEPTH];
- struct list_head drop_list;
- } leaf;
- struct htb_class_inner {
- struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
- struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
- /* When class changes from state 1->2 and disconnects from
- parent's feed then we lost ptr value and start from the
- first child again. Here we store classid of the
- last valid ptr (used when ptr is NULL). */
- u32 last_ptr_id[TC_HTB_NUMPRIO];
- } inner;
- } un;
- struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
- struct rb_node pq_node; /* node for event queue */
- unsigned long pq_key; /* the same type as jiffies global */
-
- int prio_activity; /* for which prios are we active */
- enum htb_cmode cmode; /* current mode of the class */
-
- /* class attached filters */
- struct tcf_proto *filter_list;
- int filter_cnt;
-
- int warned; /* only one warning about non work conserving .. */
-
- /* token bucket parameters */
- struct qdisc_rate_table *rate; /* rate table of the class itself */
- struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
- long buffer,cbuffer; /* token bucket depth/rate */
- psched_tdiff_t mbuffer; /* max wait time */
- long tokens,ctokens; /* current number of tokens */
- psched_time_t t_c; /* checkpoint time */
+ /* topology */
+ int level; /* our level (see above) */
+ struct htb_class *parent; /* parent class */
+ struct hlist_node hlist; /* classid hash list item */
+ struct list_head sibling; /* sibling list item */
+ struct list_head children; /* children list */
+
+ union {
+ struct htb_class_leaf {
+ struct Qdisc *q;
+ int prio;
+ int aprio;
+ int quantum;
+ int deficit[TC_HTB_MAXDEPTH];
+ struct list_head drop_list;
+ } leaf;
+ struct htb_class_inner {
+ struct rb_root feed[TC_HTB_NUMPRIO]; /* feed trees */
+ struct rb_node *ptr[TC_HTB_NUMPRIO]; /* current class ptr */
+ /* When class changes from state 1->2 and disconnects from
+ parent's feed then we lost ptr value and start from the
+ first child again. Here we store classid of the
+ last valid ptr (used when ptr is NULL). */
+ u32 last_ptr_id[TC_HTB_NUMPRIO];
+ } inner;
+ } un;
+ struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
+ struct rb_node pq_node; /* node for event queue */
+ unsigned long pq_key; /* the same type as jiffies global */
+
+ int prio_activity; /* for which prios are we active */
+ enum htb_cmode cmode; /* current mode of the class */
+
+ /* class attached filters */
+ struct tcf_proto *filter_list;
+ int filter_cnt;
+
+ int warned; /* only one warning about non work conserving .. */
+
+ /* token bucket parameters */
+ struct qdisc_rate_table *rate; /* rate table of the class itself */
+ struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
+ long buffer, cbuffer; /* token bucket depth/rate */
+ psched_tdiff_t mbuffer; /* max wait time */
+ long tokens, ctokens; /* current number of tokens */
+ psched_time_t t_c; /* checkpoint time */
};
/* TODO: maybe compute rate when size is too large .. or drop ? */
-static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
- int size)
-{
- int slot = size >> rate->rate.cell_log;
- if (slot > 255) {
- cl->xstats.giants++;
- slot = 255;
- }
- return rate->data[slot];
+static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
+ int size)
+{
+ int slot = size >> rate->rate.cell_log;
+ if (slot > 255) {
+ cl->xstats.giants++;
+ slot = 255;
+ }
+ return rate->data[slot];
}
-struct htb_sched
-{
- struct list_head root; /* root classes list */
- struct list_head hash[HTB_HSIZE]; /* hashed by classid */
- struct list_head drops[TC_HTB_NUMPRIO]; /* active leaves (for drops) */
-
- /* self list - roots of self generating tree */
- struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
- int row_mask[TC_HTB_MAXDEPTH];
- struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
- u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
-
- /* self wait list - roots of wait PQs per row */
- struct rb_root wait_pq[TC_HTB_MAXDEPTH];
-
- /* time of nearest event per level (row) */
- unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
-
- /* cached value of jiffies in dequeue */
- unsigned long jiffies;
-
- /* whether we hit non-work conserving class during this dequeue; we use */
- int nwc_hit; /* this to disable mindelay complaint in dequeue */
-
- int defcls; /* class where unclassified flows go to */
- u32 debug; /* subsystem debug levels */
-
- /* filters for qdisc itself */
- struct tcf_proto *filter_list;
- int filter_cnt;
-
- int rate2quantum; /* quant = rate / rate2quantum */
- psched_time_t now; /* cached dequeue time */
- struct timer_list timer; /* send delay timer */
+struct htb_sched {
+ struct list_head root; /* root classes list */
+ struct hlist_head hash[HTB_HSIZE]; /* hashed by classid */
+ struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
+
+ /* self list - roots of self generating tree */
+ struct rb_root row[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
+ int row_mask[TC_HTB_MAXDEPTH];
+ struct rb_node *ptr[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
+ u32 last_ptr_id[TC_HTB_MAXDEPTH][TC_HTB_NUMPRIO];
+
+ /* self wait list - roots of wait PQs per row */
+ struct rb_root wait_pq[TC_HTB_MAXDEPTH];
+
+ /* time of nearest event per level (row) */
+ unsigned long near_ev_cache[TC_HTB_MAXDEPTH];
+
+ /* cached value of jiffies in dequeue */
+ unsigned long jiffies;
+
+ /* whether we hit non-work conserving class during this dequeue; we use */
+ int nwc_hit; /* this to disable mindelay complaint in dequeue */
+
+ int defcls; /* class where unclassified flows go to */
+
+ /* filters for qdisc itself */
+ struct tcf_proto *filter_list;
+ int filter_cnt;
+
+ int rate2quantum; /* quant = rate / rate2quantum */
+ psched_time_t now; /* cached dequeue time */
+ struct timer_list timer; /* send delay timer */
#ifdef HTB_RATECM
- struct timer_list rttim; /* rate computer timer */
- int recmp_bucket; /* which hash bucket to recompute next */
+ struct timer_list rttim; /* rate computer timer */
+ int recmp_bucket; /* which hash bucket to recompute next */
#endif
-
- /* non shaped skbs; let them go directly thru */
- struct sk_buff_head direct_queue;
- int direct_qlen; /* max qlen of above */
- long direct_pkts;
+ /* non shaped skbs; let them go directly thru */
+ struct sk_buff_head direct_queue;
+ int direct_qlen; /* max qlen of above */
+
+ long direct_pkts;
};
/* compute hash of size HTB_HSIZE for given handle */
-static __inline__ int htb_hash(u32 h)
+static inline int htb_hash(u32 h)
{
#if HTB_HSIZE != 16
- #error "Declare new hash for your HTB_HSIZE"
+#error "Declare new hash for your HTB_HSIZE"
#endif
- h ^= h>>8; /* stolen from cbq_hash */
- h ^= h>>4;
- return h & 0xf;
+ h ^= h >> 8; /* stolen from cbq_hash */
+ h ^= h >> 4;
+ return h & 0xf;
}
/* find class in global hash table using given handle */
-static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
+static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- struct list_head *p;
- if (TC_H_MAJ(handle) != sch->handle)
+ struct hlist_node *p;
+ struct htb_class *cl;
+
+ if (TC_H_MAJ(handle) != sch->handle)
return NULL;
-
- list_for_each (p,q->hash+htb_hash(handle)) {
- struct htb_class *cl = list_entry(p,struct htb_class,hlist);
+
+ hlist_for_each_entry(cl, p, q->hash + htb_hash(handle), hlist) {
if (cl->classid == handle)
return cl;
}
@@ -304,7 +251,8 @@ static inline u32 htb_classid(struct htb_class *cl)
return (cl && cl != HTB_DIRECT) ? cl->classid : TC_H_UNSPEC;
}
-static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
+static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
+ int *qerr)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl;
@@ -316,8 +264,8 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
note that nfmark can be used too by attaching filter fw with no
rules in it */
if (skb->priority == sch->handle)
- return HTB_DIRECT; /* X:0 (direct flow) selected */
- if ((cl = htb_find(skb->priority,sch)) != NULL && cl->level == 0)
+ return HTB_DIRECT; /* X:0 (direct flow) selected */
+ if ((cl = htb_find(skb->priority, sch)) != NULL && cl->level == 0)
return cl;
*qerr = NET_XMIT_BYPASS;
@@ -326,7 +274,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
- case TC_ACT_STOLEN:
+ case TC_ACT_STOLEN:
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
@@ -335,97 +283,44 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch, in
if (result == TC_POLICE_SHOT)
return HTB_DIRECT;
#endif
- if ((cl = (void*)res.class) == NULL) {
+ if ((cl = (void *)res.class) == NULL) {
if (res.classid == sch->handle)
- return HTB_DIRECT; /* X:0 (direct flow) */
- if ((cl = htb_find(res.classid,sch)) == NULL)
- break; /* filter selected invalid classid */
+ return HTB_DIRECT; /* X:0 (direct flow) */
+ if ((cl = htb_find(res.classid, sch)) == NULL)
+ break; /* filter selected invalid classid */
}
if (!cl->level)
- return cl; /* we hit leaf; return it */
+ return cl; /* we hit leaf; return it */
/* we have got inner class; apply inner filter chain */
tcf = cl->filter_list;
}
/* classification failed; try to use default class */
- cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
+ cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
if (!cl || cl->level)
- return HTB_DIRECT; /* bad default .. this is safe bet */
+ return HTB_DIRECT; /* bad default .. this is safe bet */
return cl;
}
-#ifdef HTB_DEBUG
-static void htb_next_rb_node(struct rb_node **n);
-#define HTB_DUMTREE(root,memb) if(root) { \
- struct rb_node *n = (root)->rb_node; \
- while (n->rb_left) n = n->rb_left; \
- while (n) { \
- struct htb_class *cl = rb_entry(n, struct htb_class, memb); \
- printk(" %x",cl->classid); htb_next_rb_node (&n); \
- } }
-
-static void htb_debug_dump (struct htb_sched *q)
-{
- int i,p;
- printk(KERN_DEBUG "htb*g j=%lu lj=%lu\n",jiffies,q->jiffies);
- /* rows */
- for (i=TC_HTB_MAXDEPTH-1;i>=0;i--) {
- printk(KERN_DEBUG "htb*r%d m=%x",i,q->row_mask[i]);
- for (p=0;p<TC_HTB_NUMPRIO;p++) {
- if (!q->row[i][p].rb_node) continue;
- printk(" p%d:",p);
- HTB_DUMTREE(q->row[i]+p,node[p]);
- }
- printk("\n");
- }
- /* classes */
- for (i = 0; i < HTB_HSIZE; i++) {
- struct list_head *l;
- list_for_each (l,q->hash+i) {
- struct htb_class *cl = list_entry(l,struct htb_class,hlist);
- long diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
- printk(KERN_DEBUG "htb*c%x m=%d t=%ld c=%ld pq=%lu df=%ld ql=%d "
- "pa=%x f:",
- cl->classid,cl->cmode,cl->tokens,cl->ctokens,
- cl->pq_node.rb_color==-1?0:cl->pq_key,diff,
- cl->level?0:cl->un.leaf.q->q.qlen,cl->prio_activity);
- if (cl->level)
- for (p=0;p<TC_HTB_NUMPRIO;p++) {
- if (!cl->un.inner.feed[p].rb_node) continue;
- printk(" p%d a=%x:",p,cl->un.inner.ptr[p]?rb_entry(cl->un.inner.ptr[p], struct htb_class,node[p])->classid:0);
- HTB_DUMTREE(cl->un.inner.feed+p,node[p]);
- }
- printk("\n");
- }
- }
-}
-#endif
/**
* htb_add_to_id_tree - adds class to the round robin list
*
* Routine adds class to the list (actually tree) sorted by classid.
* Make sure that class is not already on such list for given prio.
*/
-static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
- struct htb_class *cl,int prio)
+static void htb_add_to_id_tree(struct rb_root *root,
+ struct htb_class *cl, int prio)
{
struct rb_node **p = &root->rb_node, *parent = NULL;
- HTB_DBG(7,3,"htb_add_id_tree cl=%X prio=%d\n",cl->classid,prio);
-#ifdef HTB_DEBUG
- if (cl->node[prio].rb_color != -1) { BUG_TRAP(0); return; }
- HTB_CHCL(cl);
- if (*p) {
- struct htb_class *x = rb_entry(*p,struct htb_class,node[prio]);
- HTB_CHCL(x);
- }
-#endif
+
while (*p) {
- struct htb_class *c; parent = *p;
+ struct htb_class *c;
+ parent = *p;
c = rb_entry(parent, struct htb_class, node[prio]);
- HTB_CHCL(c);
+
if (cl->classid > c->classid)
p = &parent->rb_right;
- else
+ else
p = &parent->rb_left;
}
rb_link_node(&cl->node[prio], parent, p);
@@ -439,17 +334,11 @@ static void htb_add_to_id_tree (HTB_ARGQ struct rb_root *root,
* change its mode in cl->pq_key microseconds. Make sure that class is not
* already in the queue.
*/
-static void htb_add_to_wait_tree (struct htb_sched *q,
- struct htb_class *cl,long delay,int debug_hint)
+static void htb_add_to_wait_tree(struct htb_sched *q,
+ struct htb_class *cl, long delay)
{
struct rb_node **p = &q->wait_pq[cl->level].rb_node, *parent = NULL;
- HTB_DBG(7,3,"htb_add_wt cl=%X key=%lu\n",cl->classid,cl->pq_key);
-#ifdef HTB_DEBUG
- if (cl->pq_node.rb_color != -1) { BUG_TRAP(0); return; }
- HTB_CHCL(cl);
- if ((delay <= 0 || delay > cl->mbuffer) && net_ratelimit())
- printk(KERN_ERR "HTB: suspicious delay in wait_tree d=%ld cl=%X h=%d\n",delay,cl->classid,debug_hint);
-#endif
+
cl->pq_key = q->jiffies + PSCHED_US2JIFFIE(delay);
if (cl->pq_key == q->jiffies)
cl->pq_key++;
@@ -457,13 +346,14 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
/* update the nearest event cache */
if (time_after(q->near_ev_cache[cl->level], cl->pq_key))
q->near_ev_cache[cl->level] = cl->pq_key;
-
+
while (*p) {
- struct htb_class *c; parent = *p;
+ struct htb_class *c;
+ parent = *p;
c = rb_entry(parent, struct htb_class, pq_node);
if (time_after_eq(cl->pq_key, c->pq_key))
p = &parent->rb_right;
- else
+ else
p = &parent->rb_left;
}
rb_link_node(&cl->pq_node, parent, p);
@@ -476,7 +366,7 @@ static void htb_add_to_wait_tree (struct htb_sched *q,
* When we are past last key we return NULL.
* Average complexity is 2 steps per call.
*/
-static void htb_next_rb_node(struct rb_node **n)
+static inline void htb_next_rb_node(struct rb_node **n)
{
*n = rb_next(*n);
}
@@ -487,42 +377,51 @@ static void htb_next_rb_node(struct rb_node **n)
* The class is added to row at priorities marked in mask.
* It does nothing if mask == 0.
*/
-static inline void htb_add_class_to_row(struct htb_sched *q,
- struct htb_class *cl,int mask)
+static inline void htb_add_class_to_row(struct htb_sched *q,
+ struct htb_class *cl, int mask)
{
- HTB_DBG(7,2,"htb_addrow cl=%X mask=%X rmask=%X\n",
- cl->classid,mask,q->row_mask[cl->level]);
- HTB_CHCL(cl);
q->row_mask[cl->level] |= mask;
while (mask) {
int prio = ffz(~mask);
mask &= ~(1 << prio);
- htb_add_to_id_tree(HTB_PASSQ q->row[cl->level]+prio,cl,prio);
+ htb_add_to_id_tree(q->row[cl->level] + prio, cl, prio);
+ }
+}
+
+/* If this triggers, it is a bug in this code, but it need not be fatal */
+static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
+{
+ if (RB_EMPTY_NODE(rb)) {
+ WARN_ON(1);
+ } else {
+ rb_erase(rb, root);
+ RB_CLEAR_NODE(rb);
}
}
+
/**
* htb_remove_class_from_row - removes class from its row
*
* The class is removed from row at priorities marked in mask.
* It does nothing if mask == 0.
*/
-static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
- struct htb_class *cl,int mask)
+static inline void htb_remove_class_from_row(struct htb_sched *q,
+ struct htb_class *cl, int mask)
{
int m = 0;
- HTB_CHCL(cl);
+
while (mask) {
int prio = ffz(~mask);
+
mask &= ~(1 << prio);
- if (q->ptr[cl->level][prio] == cl->node+prio)
- htb_next_rb_node(q->ptr[cl->level]+prio);
- htb_safe_rb_erase(cl->node + prio,q->row[cl->level]+prio);
- if (!q->row[cl->level][prio].rb_node)
+ if (q->ptr[cl->level][prio] == cl->node + prio)
+ htb_next_rb_node(q->ptr[cl->level] + prio);
+
+ htb_safe_rb_erase(cl->node + prio, q->row[cl->level] + prio);
+ if (!q->row[cl->level][prio].rb_node)
m |= 1 << prio;
}
- HTB_DBG(7,2,"htb_delrow cl=%X mask=%X rmask=%X maskdel=%X\n",
- cl->classid,mask,q->row_mask[cl->level],m);
q->row_mask[cl->level] &= ~m;
}
@@ -533,34 +432,31 @@ static __inline__ void htb_remove_class_from_row(struct htb_sched *q,
* for priorities it is participating on. cl->cmode must be new
* (activated) mode. It does nothing if cl->prio_activity == 0.
*/
-static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
+static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
- long m,mask = cl->prio_activity;
- HTB_DBG(7,2,"htb_act_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
- HTB_CHCL(cl);
+ long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
- HTB_CHCL(p);
- m = mask; while (m) {
+ m = mask;
+ while (m) {
int prio = ffz(~m);
m &= ~(1 << prio);
-
+
if (p->un.inner.feed[prio].rb_node)
/* parent already has its feed in use so that
reset bit in mask as parent is already ok */
mask &= ~(1 << prio);
-
- htb_add_to_id_tree(HTB_PASSQ p->un.inner.feed+prio,cl,prio);
+
+ htb_add_to_id_tree(p->un.inner.feed + prio, cl, prio);
}
- HTB_DBG(7,3,"htb_act_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
- p->classid,p->prio_activity,mask,p->cmode);
p->prio_activity |= mask;
- cl = p; p = cl->parent;
- HTB_CHCL(cl);
+ cl = p;
+ p = cl->parent;
+
}
if (cl->cmode == HTB_CAN_SEND && mask)
- htb_add_class_to_row(q,cl,mask);
+ htb_add_class_to_row(q, cl, mask);
}
/**
@@ -573,39 +469,52 @@ static void htb_activate_prios(struct htb_sched *q,struct htb_class *cl)
static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
{
struct htb_class *p = cl->parent;
- long m,mask = cl->prio_activity;
- HTB_DBG(7,2,"htb_deact_prios cl=%X mask=%lX cmode=%d\n",cl->classid,mask,cl->cmode);
- HTB_CHCL(cl);
+ long m, mask = cl->prio_activity;
while (cl->cmode == HTB_MAY_BORROW && p && mask) {
- m = mask; mask = 0;
+ m = mask;
+ mask = 0;
while (m) {
int prio = ffz(~m);
m &= ~(1 << prio);
-
- if (p->un.inner.ptr[prio] == cl->node+prio) {
+
+ if (p->un.inner.ptr[prio] == cl->node + prio) {
/* we are removing child which is pointed to from
parent feed - forget the pointer but remember
classid */
p->un.inner.last_ptr_id[prio] = cl->classid;
p->un.inner.ptr[prio] = NULL;
}
-
- htb_safe_rb_erase(cl->node + prio,p->un.inner.feed + prio);
-
- if (!p->un.inner.feed[prio].rb_node)
+
+ htb_safe_rb_erase(cl->node + prio, p->un.inner.feed + prio);
+
+ if (!p->un.inner.feed[prio].rb_node)
mask |= 1 << prio;
}
- HTB_DBG(7,3,"htb_deact_pr_aft p=%X pact=%X mask=%lX pmode=%d\n",
- p->classid,p->prio_activity,mask,p->cmode);
+
p->prio_activity &= ~mask;
- cl = p; p = cl->parent;
- HTB_CHCL(cl);
+ cl = p;
+ p = cl->parent;
+
}
- if (cl->cmode == HTB_CAN_SEND && mask)
- htb_remove_class_from_row(q,cl,mask);
+ if (cl->cmode == HTB_CAN_SEND && mask)
+ htb_remove_class_from_row(q, cl, mask);
}
+#if HTB_HYSTERESIS
+static inline long htb_lowater(const struct htb_class *cl)
+{
+ return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
+}
+static inline long htb_hiwater(const struct htb_class *cl)
+{
+ return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
+}
+#else
+#define htb_lowater(cl) (0)
+#define htb_hiwater(cl) (0)
+#endif
+
/**
* htb_class_mode - computes and returns current class mode
*
@@ -617,28 +526,21 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
* 0 .. -cl->{c,}buffer range. It is meant to limit number of
* mode transitions per time unit. The speed gain is about 1/6.
*/
-static __inline__ enum htb_cmode
-htb_class_mode(struct htb_class *cl,long *diff)
+static inline enum htb_cmode
+htb_class_mode(struct htb_class *cl, long *diff)
{
- long toks;
+ long toks;
- if ((toks = (cl->ctokens + *diff)) < (
-#if HTB_HYSTERESIS
- cl->cmode != HTB_CANT_SEND ? -cl->cbuffer :
-#endif
- 0)) {
- *diff = -toks;
- return HTB_CANT_SEND;
- }
- if ((toks = (cl->tokens + *diff)) >= (
-#if HTB_HYSTERESIS
- cl->cmode == HTB_CAN_SEND ? -cl->buffer :
-#endif
- 0))
- return HTB_CAN_SEND;
+ if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
+ *diff = -toks;
+ return HTB_CANT_SEND;
+ }
+
+ if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
+ return HTB_CAN_SEND;
- *diff = -toks;
- return HTB_MAY_BORROW;
+ *diff = -toks;
+ return HTB_MAY_BORROW;
}
/**
@@ -650,24 +552,21 @@ htb_class_mode(struct htb_class *cl,long *diff)
* be different from old one and cl->pq_key has to be valid if changing
* to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
*/
-static void
+static void
htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
-{
- enum htb_cmode new_mode = htb_class_mode(cl,diff);
-
- HTB_CHCL(cl);
- HTB_DBG(7,1,"htb_chging_clmode %d->%d cl=%X\n",cl->cmode,new_mode,cl->classid);
+{
+ enum htb_cmode new_mode = htb_class_mode(cl, diff);
if (new_mode == cl->cmode)
- return;
-
- if (cl->prio_activity) { /* not necessary: speed optimization */
- if (cl->cmode != HTB_CANT_SEND)
- htb_deactivate_prios(q,cl);
+ return;
+
+ if (cl->prio_activity) { /* not necessary: speed optimization */
+ if (cl->cmode != HTB_CANT_SEND)
+ htb_deactivate_prios(q, cl);
cl->cmode = new_mode;
- if (new_mode != HTB_CANT_SEND)
- htb_activate_prios(q,cl);
- } else
+ if (new_mode != HTB_CANT_SEND)
+ htb_activate_prios(q, cl);
+ } else
cl->cmode = new_mode;
}
@@ -678,14 +577,15 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, long *diff)
* for the prio. It can be called on already active leaf safely.
* It also adds leaf into droplist.
*/
-static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
+static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
{
BUG_TRAP(!cl->level && cl->un.leaf.q && cl->un.leaf.q->q.qlen);
- HTB_CHCL(cl);
+
if (!cl->prio_activity) {
cl->prio_activity = 1 << (cl->un.leaf.aprio = cl->un.leaf.prio);
- htb_activate_prios(q,cl);
- list_add_tail(&cl->un.leaf.drop_list,q->drops+cl->un.leaf.aprio);
+ htb_activate_prios(q, cl);
+ list_add_tail(&cl->un.leaf.drop_list,
+ q->drops + cl->un.leaf.aprio);
}
}
@@ -695,120 +595,120 @@ static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
* Make sure that leaf is active. In the other words it can't be called
* with non-active leaf. It also removes class from the drop list.
*/
-static __inline__ void
-htb_deactivate(struct htb_sched *q,struct htb_class *cl)
+static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
BUG_TRAP(cl->prio_activity);
- HTB_CHCL(cl);
- htb_deactivate_prios(q,cl);
+
+ htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
list_del_init(&cl->un.leaf.drop_list);
}
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
- int ret;
- struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = htb_classify(skb,sch,&ret);
-
- if (cl == HTB_DIRECT) {
- /* enqueue to helper queue */
- if (q->direct_queue.qlen < q->direct_qlen) {
- __skb_queue_tail(&q->direct_queue, skb);
- q->direct_pkts++;
- } else {
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_DROP;
- }
+ int ret;
+ struct htb_sched *q = qdisc_priv(sch);
+ struct htb_class *cl = htb_classify(skb, sch, &ret);
+
+ if (cl == HTB_DIRECT) {
+ /* enqueue to helper queue */
+ if (q->direct_queue.qlen < q->direct_qlen) {
+ __skb_queue_tail(&q->direct_queue, skb);
+ q->direct_pkts++;
+ } else {
+ kfree_skb(skb);
+ sch->qstats.drops++;
+ return NET_XMIT_DROP;
+ }
#ifdef CONFIG_NET_CLS_ACT
- } else if (!cl) {
- if (ret == NET_XMIT_BYPASS)
- sch->qstats.drops++;
- kfree_skb (skb);
- return ret;
+ } else if (!cl) {
+ if (ret == NET_XMIT_BYPASS)
+ sch->qstats.drops++;
+ kfree_skb(skb);
+ return ret;
#endif
- } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
- } else {
- cl->bstats.packets++; cl->bstats.bytes += skb->len;
- htb_activate (q,cl);
- }
-
- sch->q.qlen++;
- sch->bstats.packets++; sch->bstats.bytes += skb->len;
- HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
- return NET_XMIT_SUCCESS;
+ } else if (cl->un.leaf.q->enqueue(skb, cl->un.leaf.q) !=
+ NET_XMIT_SUCCESS) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ return NET_XMIT_DROP;
+ } else {
+ cl->bstats.packets++;
+ cl->bstats.bytes += skb->len;
+ htb_activate(q, cl);
+ }
+
+ sch->q.qlen++;
+ sch->bstats.packets++;
+ sch->bstats.bytes += skb->len;
+ return NET_XMIT_SUCCESS;
}
/* TODO: requeuing packet charges it to policers again !! */
static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
- struct htb_sched *q = qdisc_priv(sch);
- int ret = NET_XMIT_SUCCESS;
- struct htb_class *cl = htb_classify(skb,sch, &ret);
- struct sk_buff *tskb;
-
- if (cl == HTB_DIRECT || !cl) {
- /* enqueue to helper queue */
- if (q->direct_queue.qlen < q->direct_qlen && cl) {
- __skb_queue_head(&q->direct_queue, skb);
- } else {
- __skb_queue_head(&q->direct_queue, skb);
- tskb = __skb_dequeue_tail(&q->direct_queue);
- kfree_skb (tskb);
- sch->qstats.drops++;
- return NET_XMIT_CN;
- }
- } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
- sch->qstats.drops++;
- cl->qstats.drops++;
- return NET_XMIT_DROP;
- } else
- htb_activate (q,cl);
-
- sch->q.qlen++;
- sch->qstats.requeues++;
- HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",(cl && cl != HTB_DIRECT)?cl->classid:0,skb);
- return NET_XMIT_SUCCESS;
+ struct htb_sched *q = qdisc_priv(sch);
+ int ret = NET_XMIT_SUCCESS;
+ struct htb_class *cl = htb_classify(skb, sch, &ret);
+ struct sk_buff *tskb;
+
+ if (cl == HTB_DIRECT || !cl) {
+ /* enqueue to helper queue */
+ if (q->direct_queue.qlen < q->direct_qlen && cl) {
+ __skb_queue_head(&q->direct_queue, skb);
+ } else {
+ __skb_queue_head(&q->direct_queue, skb);
+ tskb = __skb_dequeue_tail(&q->direct_queue);
+ kfree_skb(tskb);
+ sch->qstats.drops++;
+ return NET_XMIT_CN;
+ }
+ } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+ NET_XMIT_SUCCESS) {
+ sch->qstats.drops++;
+ cl->qstats.drops++;
+ return NET_XMIT_DROP;
+ } else
+ htb_activate(q, cl);
+
+ sch->q.qlen++;
+ sch->qstats.requeues++;
+ return NET_XMIT_SUCCESS;
}
static void htb_timer(unsigned long arg)
{
- struct Qdisc *sch = (struct Qdisc*)arg;
- sch->flags &= ~TCQ_F_THROTTLED;
- wmb();
- netif_schedule(sch->dev);
+ struct Qdisc *sch = (struct Qdisc *)arg;
+ sch->flags &= ~TCQ_F_THROTTLED;
+ wmb();
+ netif_schedule(sch->dev);
}
#ifdef HTB_RATECM
#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
static void htb_rate_timer(unsigned long arg)
{
- struct Qdisc *sch = (struct Qdisc*)arg;
+ struct Qdisc *sch = (struct Qdisc *)arg;
struct htb_sched *q = qdisc_priv(sch);
- struct list_head *p;
+ struct hlist_node *p;
+ struct htb_class *cl;
+
/* lock queue so that we can muck with it */
- HTB_QLOCK(sch);
- HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
+ spin_lock_bh(&sch->dev->queue_lock);
q->rttim.expires = jiffies + HZ;
add_timer(&q->rttim);
/* scan and recompute one bucket at time */
- if (++q->recmp_bucket >= HTB_HSIZE)
+ if (++q->recmp_bucket >= HTB_HSIZE)
q->recmp_bucket = 0;
- list_for_each (p,q->hash+q->recmp_bucket) {
- struct htb_class *cl = list_entry(p,struct htb_class,hlist);
- HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",
- cl->classid,cl->sum_bytes,cl->sum_packets);
- RT_GEN (cl->sum_bytes,cl->rate_bytes);
- RT_GEN (cl->sum_packets,cl->rate_packets);
+
+ hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
+ RT_GEN(cl->sum_bytes, cl->rate_bytes);
+ RT_GEN(cl->sum_packets, cl->rate_packets);
}
- HTB_QUNLOCK(sch);
+ spin_unlock_bh(&sch->dev->queue_lock);
}
#endif
@@ -823,12 +723,11 @@ static void htb_rate_timer(unsigned long arg)
* CAN_SEND) because we can use more precise clock that event queue here.
* In such case we remove class from event queue first.
*/
-static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
- int level,int bytes)
-{
- long toks,diff;
+static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
+ int level, int bytes)
+{
+ long toks, diff;
enum htb_cmode old_mode;
- HTB_DBG(5,1,"htb_chrg_cl cl=%X lev=%d len=%d\n",cl->classid,level,bytes);
#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
if (toks > cl->B) toks = cl->B; \
@@ -837,47 +736,31 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
cl->T = toks
while (cl) {
- HTB_CHCL(cl);
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
-#ifdef HTB_DEBUG
- if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
- if (net_ratelimit())
- printk(KERN_ERR "HTB: bad diff in charge, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
- cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
- (unsigned long long) q->now,
- (unsigned long long) cl->t_c,
-#endif
- q->jiffies);
- diff = 1000;
- }
-#endif
+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
if (cl->level >= level) {
- if (cl->level == level) cl->xstats.lends++;
- HTB_ACCNT (tokens,buffer,rate);
+ if (cl->level == level)
+ cl->xstats.lends++;
+ HTB_ACCNT(tokens, buffer, rate);
} else {
cl->xstats.borrows++;
- cl->tokens += diff; /* we moved t_c; update tokens */
+ cl->tokens += diff; /* we moved t_c; update tokens */
}
- HTB_ACCNT (ctokens,cbuffer,ceil);
+ HTB_ACCNT(ctokens, cbuffer, ceil);
cl->t_c = q->now;
- HTB_DBG(5,2,"htb_chrg_clp cl=%X diff=%ld tok=%ld ctok=%ld\n",cl->classid,diff,cl->tokens,cl->ctokens);
- old_mode = cl->cmode; diff = 0;
- htb_change_class_mode(q,cl,&diff);
+ old_mode = cl->cmode;
+ diff = 0;
+ htb_change_class_mode(q, cl, &diff);
if (old_mode != cl->cmode) {
if (old_mode != HTB_CAN_SEND)
- htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
+ htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
if (cl->cmode != HTB_CAN_SEND)
- htb_add_to_wait_tree (q,cl,diff,1);
+ htb_add_to_wait_tree(q, cl, diff);
}
-
#ifdef HTB_RATECM
/* update rate counters */
- cl->sum_bytes += bytes; cl->sum_packets++;
+ cl->sum_bytes += bytes;
+ cl->sum_packets++;
#endif
/* update byte stats except for leaves which are already updated */
@@ -896,60 +779,46 @@ static void htb_charge_class(struct htb_sched *q,struct htb_class *cl,
* next pending event (0 for no event in pq).
* Note: Aplied are events whose have cl->pq_key <= jiffies.
*/
-static long htb_do_events(struct htb_sched *q,int level)
+static long htb_do_events(struct htb_sched *q, int level)
{
int i;
- HTB_DBG(8,1,"htb_do_events l=%d root=%p rmask=%X\n",
- level,q->wait_pq[level].rb_node,q->row_mask[level]);
+
for (i = 0; i < 500; i++) {
struct htb_class *cl;
long diff;
struct rb_node *p = q->wait_pq[level].rb_node;
- if (!p) return 0;
- while (p->rb_left) p = p->rb_left;
+ if (!p)
+ return 0;
+ while (p->rb_left)
+ p = p->rb_left;
cl = rb_entry(p, struct htb_class, pq_node);
if (time_after(cl->pq_key, q->jiffies)) {
- HTB_DBG(8,3,"htb_do_ev_ret delay=%ld\n",cl->pq_key - q->jiffies);
return cl->pq_key - q->jiffies;
}
- htb_safe_rb_erase(p,q->wait_pq+level);
- diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer);
-#ifdef HTB_DEBUG
- if (diff > cl->mbuffer || diff < 0 || PSCHED_TLESS(q->now, cl->t_c)) {
- if (net_ratelimit())
- printk(KERN_ERR "HTB: bad diff in events, cl=%X diff=%lX now=%Lu then=%Lu j=%lu\n",
- cl->classid, diff,
-#ifdef CONFIG_NET_SCH_CLK_GETTIMEOFDAY
- q->now.tv_sec * 1000000ULL + q->now.tv_usec,
- cl->t_c.tv_sec * 1000000ULL + cl->t_c.tv_usec,
-#else
- (unsigned long long) q->now,
- (unsigned long long) cl->t_c,
-#endif
- q->jiffies);
- diff = 1000;
- }
-#endif
- htb_change_class_mode(q,cl,&diff);
+ htb_safe_rb_erase(p, q->wait_pq + level);
+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32) cl->mbuffer);
+ htb_change_class_mode(q, cl, &diff);
if (cl->cmode != HTB_CAN_SEND)
- htb_add_to_wait_tree (q,cl,diff,2);
+ htb_add_to_wait_tree(q, cl, diff);
}
if (net_ratelimit())
printk(KERN_WARNING "htb: too many events !\n");
- return HZ/10;
+ return HZ / 10;
}
/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
is no such one exists. */
-static struct rb_node *
-htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
+static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
+ u32 id)
{
struct rb_node *r = NULL;
while (n) {
- struct htb_class *cl = rb_entry(n,struct htb_class,node[prio]);
- if (id == cl->classid) return n;
-
+ struct htb_class *cl =
+ rb_entry(n, struct htb_class, node[prio]);
+ if (id == cl->classid)
+ return n;
+
if (id > cl->classid) {
n = n->rb_right;
} else {
@@ -965,49 +834,49 @@ htb_id_find_next_upper(int prio,struct rb_node *n,u32 id)
*
* Find leaf where current feed pointers points to.
*/
-static struct htb_class *
-htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32 *pid)
+static struct htb_class *htb_lookup_leaf(struct rb_root *tree, int prio,
+ struct rb_node **pptr, u32 * pid)
{
int i;
struct {
struct rb_node *root;
struct rb_node **pptr;
u32 *pid;
- } stk[TC_HTB_MAXDEPTH],*sp = stk;
-
+ } stk[TC_HTB_MAXDEPTH], *sp = stk;
+
BUG_TRAP(tree->rb_node);
sp->root = tree->rb_node;
sp->pptr = pptr;
sp->pid = pid;
for (i = 0; i < 65535; i++) {
- HTB_DBG(4,2,"htb_lleaf ptr=%p pid=%X\n",*sp->pptr,*sp->pid);
-
- if (!*sp->pptr && *sp->pid) {
+ if (!*sp->pptr && *sp->pid) {
/* ptr was invalidated but id is valid - try to recover
the original or next ptr */
- *sp->pptr = htb_id_find_next_upper(prio,sp->root,*sp->pid);
+ *sp->pptr =
+ htb_id_find_next_upper(prio, sp->root, *sp->pid);
}
- *sp->pid = 0; /* ptr is valid now so that remove this hint as it
- can become out of date quickly */
- if (!*sp->pptr) { /* we are at right end; rewind & go up */
+ *sp->pid = 0; /* ptr is valid now so that remove this hint as it
+ can become out of date quickly */
+ if (!*sp->pptr) { /* we are at right end; rewind & go up */
*sp->pptr = sp->root;
- while ((*sp->pptr)->rb_left)
+ while ((*sp->pptr)->rb_left)
*sp->pptr = (*sp->pptr)->rb_left;
if (sp > stk) {
sp--;
- BUG_TRAP(*sp->pptr); if(!*sp->pptr) return NULL;
- htb_next_rb_node (sp->pptr);
+ BUG_TRAP(*sp->pptr);
+ if (!*sp->pptr)
+ return NULL;
+ htb_next_rb_node(sp->pptr);
}
} else {
struct htb_class *cl;
- cl = rb_entry(*sp->pptr,struct htb_class,node[prio]);
- HTB_CHCL(cl);
- if (!cl->level)
+ cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
+ if (!cl->level)
return cl;
(++sp)->root = cl->un.inner.feed[prio].rb_node;
- sp->pptr = cl->un.inner.ptr+prio;
- sp->pid = cl->un.inner.last_ptr_id+prio;
+ sp->pptr = cl->un.inner.ptr + prio;
+ sp->pid = cl->un.inner.last_ptr_id + prio;
}
}
BUG_TRAP(0);
@@ -1016,21 +885,21 @@ htb_lookup_leaf(HTB_ARGQ struct rb_root *tree,int prio,struct rb_node **pptr,u32
/* dequeues packet at given priority and level; call only if
you are sure that there is active class at prio/level */
-static struct sk_buff *
-htb_dequeue_tree(struct htb_sched *q,int prio,int level)
+static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, int prio,
+ int level)
{
struct sk_buff *skb = NULL;
- struct htb_class *cl,*start;
+ struct htb_class *cl, *start;
/* look initial class up in the row */
- start = cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,
- q->ptr[level]+prio,q->last_ptr_id[level]+prio);
-
+ start = cl = htb_lookup_leaf(q->row[level] + prio, prio,
+ q->ptr[level] + prio,
+ q->last_ptr_id[level] + prio);
+
do {
next:
- BUG_TRAP(cl);
- if (!cl) return NULL;
- HTB_DBG(4,1,"htb_deq_tr prio=%d lev=%d cl=%X defic=%d\n",
- prio,level,cl->classid,cl->un.leaf.deficit[level]);
+ BUG_TRAP(cl);
+ if (!cl)
+ return NULL;
/* class can be empty - it is unlikely but can be true if leaf
qdisc drops packets in enqueue routine or if someone used
@@ -1038,64 +907,69 @@ next:
simply deactivate and skip such class */
if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
struct htb_class *next;
- htb_deactivate(q,cl);
+ htb_deactivate(q, cl);
/* row/level might become empty */
if ((q->row_mask[level] & (1 << prio)) == 0)
- return NULL;
-
- next = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,
- prio,q->ptr[level]+prio,q->last_ptr_id[level]+prio);
+ return NULL;
+
+ next = htb_lookup_leaf(q->row[level] + prio,
+ prio, q->ptr[level] + prio,
+ q->last_ptr_id[level] + prio);
- if (cl == start) /* fix start if we just deleted it */
+ if (cl == start) /* fix start if we just deleted it */
start = next;
cl = next;
goto next;
}
-
- if (likely((skb = cl->un.leaf.q->dequeue(cl->un.leaf.q)) != NULL))
+
+ skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
+ if (likely(skb != NULL))
break;
if (!cl->warned) {
- printk(KERN_WARNING "htb: class %X isn't work conserving ?!\n",cl->classid);
+ printk(KERN_WARNING
+ "htb: class %X isn't work conserving ?!\n",
+ cl->classid);
cl->warned = 1;
}
q->nwc_hit++;
- htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
- cl = htb_lookup_leaf (HTB_PASSQ q->row[level]+prio,prio,q->ptr[level]+prio,
- q->last_ptr_id[level]+prio);
+ htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
+ ptr[0]) + prio);
+ cl = htb_lookup_leaf(q->row[level] + prio, prio,
+ q->ptr[level] + prio,
+ q->last_ptr_id[level] + prio);
} while (cl != start);
if (likely(skb != NULL)) {
if ((cl->un.leaf.deficit[level] -= skb->len) < 0) {
- HTB_DBG(4,2,"htb_next_cl oldptr=%p quant_add=%d\n",
- level?cl->parent->un.inner.ptr[prio]:q->ptr[0][prio],cl->un.leaf.quantum);
cl->un.leaf.deficit[level] += cl->un.leaf.quantum;
- htb_next_rb_node((level?cl->parent->un.inner.ptr:q->ptr[0])+prio);
+ htb_next_rb_node((level ? cl->parent->un.inner.ptr : q->
+ ptr[0]) + prio);
}
/* this used to be after charge_class but this constelation
gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen)
- htb_deactivate (q,cl);
- htb_charge_class (q,cl,level,skb->len);
+ htb_deactivate(q, cl);
+ htb_charge_class(q, cl, level, skb->len);
}
return skb;
}
-static void htb_delay_by(struct Qdisc *sch,long delay)
+static void htb_delay_by(struct Qdisc *sch, long delay)
{
struct htb_sched *q = qdisc_priv(sch);
- if (delay <= 0) delay = 1;
- if (unlikely(delay > 5*HZ)) {
+ if (delay <= 0)
+ delay = 1;
+ if (unlikely(delay > 5 * HZ)) {
if (net_ratelimit())
printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
- delay = 5*HZ;
+ delay = 5 * HZ;
}
/* why don't use jiffies here ? because expires can be in past */
mod_timer(&q->timer, q->jiffies + delay);
sch->flags |= TCQ_F_THROTTLED;
sch->qstats.overlimits++;
- HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
}
static struct sk_buff *htb_dequeue(struct Qdisc *sch)
@@ -1104,22 +978,19 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch);
int level;
long min_delay;
-#ifdef HTB_DEBUG
- int evs_used = 0;
-#endif
q->jiffies = jiffies;
- HTB_DBG(3,1,"htb_deq dircnt=%d qlen=%d\n",skb_queue_len(&q->direct_queue),
- sch->q.qlen);
/* try to dequeue direct packets as high prio (!) to minimize cpu work */
- if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
+ skb = __skb_dequeue(&q->direct_queue);
+ if (skb != NULL) {
sch->flags &= ~TCQ_F_THROTTLED;
sch->q.qlen--;
return skb;
}
- if (!sch->q.qlen) goto fin;
+ if (!sch->q.qlen)
+ goto fin;
PSCHED_GET_TIME(q->now);
min_delay = LONG_MAX;
@@ -1129,21 +1000,19 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
int m;
long delay;
if (time_after_eq(q->jiffies, q->near_ev_cache[level])) {
- delay = htb_do_events(q,level);
- q->near_ev_cache[level] = q->jiffies + (delay ? delay : HZ);
-#ifdef HTB_DEBUG
- evs_used++;
-#endif
+ delay = htb_do_events(q, level);
+ q->near_ev_cache[level] =
+ q->jiffies + (delay ? delay : HZ);
} else
- delay = q->near_ev_cache[level] - q->jiffies;
-
- if (delay && min_delay > delay)
+ delay = q->near_ev_cache[level] - q->jiffies;
+
+ if (delay && min_delay > delay)
min_delay = delay;
m = ~q->row_mask[level];
while (m != (int)(-1)) {
- int prio = ffz (m);
+ int prio = ffz(m);
m |= 1 << prio;
- skb = htb_dequeue_tree(q,prio,level);
+ skb = htb_dequeue_tree(q, prio, level);
if (likely(skb != NULL)) {
sch->q.qlen--;
sch->flags &= ~TCQ_F_THROTTLED;
@@ -1151,40 +1020,28 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
}
}
}
-#ifdef HTB_DEBUG
- if (!q->nwc_hit && min_delay >= 10*HZ && net_ratelimit()) {
- if (min_delay == LONG_MAX) {
- printk(KERN_ERR "HTB: dequeue bug (%d,%lu,%lu), report it please !\n",
- evs_used,q->jiffies,jiffies);
- htb_debug_dump(q);
- } else
- printk(KERN_WARNING "HTB: mindelay=%ld, some class has "
- "too small rate\n",min_delay);
- }
-#endif
- htb_delay_by (sch,min_delay > 5*HZ ? 5*HZ : min_delay);
+ htb_delay_by(sch, min_delay > 5 * HZ ? 5 * HZ : min_delay);
fin:
- HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p\n",sch->dev->name,q->jiffies,skb);
return skb;
}
/* try to drop from each class (by prio) until one succeed */
-static unsigned int htb_drop(struct Qdisc* sch)
+static unsigned int htb_drop(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
int prio;
for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
struct list_head *p;
- list_for_each (p,q->drops+prio) {
+ list_for_each(p, q->drops + prio) {
struct htb_class *cl = list_entry(p, struct htb_class,
un.leaf.drop_list);
unsigned int len;
- if (cl->un.leaf.q->ops->drop &&
- (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
+ if (cl->un.leaf.q->ops->drop &&
+ (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
sch->q.qlen--;
if (!cl->un.leaf.q->q.qlen)
- htb_deactivate (q,cl);
+ htb_deactivate(q, cl);
return len;
}
}
@@ -1194,29 +1051,25 @@ static unsigned int htb_drop(struct Qdisc* sch)
/* reset all classes */
/* always caled under BH & queue lock */
-static void htb_reset(struct Qdisc* sch)
+static void htb_reset(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
int i;
- HTB_DBG(0,1,"htb_reset sch=%p, handle=%X\n",sch,sch->handle);
for (i = 0; i < HTB_HSIZE; i++) {
- struct list_head *p;
- list_for_each (p,q->hash+i) {
- struct htb_class *cl = list_entry(p,struct htb_class,hlist);
+ struct hlist_node *p;
+ struct htb_class *cl;
+
+ hlist_for_each_entry(cl, p, q->hash + i, hlist) {
if (cl->level)
- memset(&cl->un.inner,0,sizeof(cl->un.inner));
+ memset(&cl->un.inner, 0, sizeof(cl->un.inner));
else {
- if (cl->un.leaf.q)
+ if (cl->un.leaf.q)
qdisc_reset(cl->un.leaf.q);
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
}
cl->prio_activity = 0;
cl->cmode = HTB_CAN_SEND;
-#ifdef HTB_DEBUG
- cl->pq_node.rb_color = -1;
- memset(cl->node,255,sizeof(cl->node));
-#endif
}
}
@@ -1224,12 +1077,12 @@ static void htb_reset(struct Qdisc* sch)
del_timer(&q->timer);
__skb_queue_purge(&q->direct_queue);
sch->q.qlen = 0;
- memset(q->row,0,sizeof(q->row));
- memset(q->row_mask,0,sizeof(q->row_mask));
- memset(q->wait_pq,0,sizeof(q->wait_pq));
- memset(q->ptr,0,sizeof(q->ptr));
+ memset(q->row, 0, sizeof(q->row));
+ memset(q->row_mask, 0, sizeof(q->row_mask));
+ memset(q->wait_pq, 0, sizeof(q->wait_pq));
+ memset(q->ptr, 0, sizeof(q->ptr));
for (i = 0; i < TC_HTB_NUMPRIO; i++)
- INIT_LIST_HEAD(q->drops+i);
+ INIT_LIST_HEAD(q->drops + i);
}
static int htb_init(struct Qdisc *sch, struct rtattr *opt)
@@ -1238,36 +1091,31 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
struct rtattr *tb[TCA_HTB_INIT];
struct tc_htb_glob *gopt;
int i;
-#ifdef HTB_DEBUG
- printk(KERN_INFO "HTB init, kernel part version %d.%d\n",
- HTB_VER >> 16,HTB_VER & 0xffff);
-#endif
if (!opt || rtattr_parse_nested(tb, TCA_HTB_INIT, opt) ||
- tb[TCA_HTB_INIT-1] == NULL ||
- RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt)) {
+ tb[TCA_HTB_INIT - 1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_HTB_INIT - 1]) < sizeof(*gopt)) {
printk(KERN_ERR "HTB: hey probably you have bad tc tool ?\n");
return -EINVAL;
}
- gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
+ gopt = RTA_DATA(tb[TCA_HTB_INIT - 1]);
if (gopt->version != HTB_VER >> 16) {
- printk(KERN_ERR "HTB: need tc/htb version %d (minor is %d), you have %d\n",
- HTB_VER >> 16,HTB_VER & 0xffff,gopt->version);
+ printk(KERN_ERR
+ "HTB: need tc/htb version %d (minor is %d), you have %d\n",
+ HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
return -EINVAL;
}
- q->debug = gopt->debug;
- HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
INIT_LIST_HEAD(&q->root);
for (i = 0; i < HTB_HSIZE; i++)
- INIT_LIST_HEAD(q->hash+i);
+ INIT_HLIST_HEAD(q->hash + i);
for (i = 0; i < TC_HTB_NUMPRIO; i++)
- INIT_LIST_HEAD(q->drops+i);
+ INIT_LIST_HEAD(q->drops + i);
init_timer(&q->timer);
skb_queue_head_init(&q->direct_queue);
q->direct_qlen = sch->dev->tx_queue_len;
- if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
+ if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2;
q->timer.function = htb_timer;
q->timer.data = (unsigned long)sch;
@@ -1289,80 +1137,72 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct htb_sched *q = qdisc_priv(sch);
- unsigned char *b = skb->tail;
+ unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_glob gopt;
- HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
- HTB_QLOCK(sch);
+ spin_lock_bh(&sch->dev->queue_lock);
gopt.direct_pkts = q->direct_pkts;
-#ifdef HTB_DEBUG
- if (HTB_DBG_COND(0,2))
- htb_debug_dump(q);
-#endif
gopt.version = HTB_VER;
gopt.rate2quantum = q->rate2quantum;
gopt.defcls = q->defcls;
- gopt.debug = q->debug;
- rta = (struct rtattr*)b;
+ gopt.debug = 0;
+ rta = (struct rtattr *)b;
RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
rta->rta_len = skb->tail - b;
- HTB_QUNLOCK(sch);
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
- HTB_QUNLOCK(sch);
+ spin_unlock_bh(&sch->dev->queue_lock);
skb_trim(skb, skb->tail - skb->data);
return -1;
}
static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
- struct sk_buff *skb, struct tcmsg *tcm)
+ struct sk_buff *skb, struct tcmsg *tcm)
{
-#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
-#endif
- struct htb_class *cl = (struct htb_class*)arg;
- unsigned char *b = skb->tail;
+ struct htb_class *cl = (struct htb_class *)arg;
+ unsigned char *b = skb->tail;
struct rtattr *rta;
struct tc_htb_opt opt;
- HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
-
- HTB_QLOCK(sch);
+ spin_lock_bh(&sch->dev->queue_lock);
tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
tcm->tcm_handle = cl->classid;
if (!cl->level && cl->un.leaf.q)
tcm->tcm_info = cl->un.leaf.q->handle;
- rta = (struct rtattr*)b;
+ rta = (struct rtattr *)b;
RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
- memset (&opt,0,sizeof(opt));
+ memset(&opt, 0, sizeof(opt));
- opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
- opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
- opt.quantum = cl->un.leaf.quantum; opt.prio = cl->un.leaf.prio;
- opt.level = cl->level;
+ opt.rate = cl->rate->rate;
+ opt.buffer = cl->buffer;
+ opt.ceil = cl->ceil->rate;
+ opt.cbuffer = cl->cbuffer;
+ opt.quantum = cl->un.leaf.quantum;
+ opt.prio = cl->un.leaf.prio;
+ opt.level = cl->level;
RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
rta->rta_len = skb->tail - b;
- HTB_QUNLOCK(sch);
+ spin_unlock_bh(&sch->dev->queue_lock);
return skb->len;
rtattr_failure:
- HTB_QUNLOCK(sch);
+ spin_unlock_bh(&sch->dev->queue_lock);
skb_trim(skb, b - skb->data);
return -1;
}
static int
-htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
+htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
- struct htb_class *cl = (struct htb_class*)arg;
+ struct htb_class *cl = (struct htb_class *)arg;
#ifdef HTB_RATECM
- cl->rate_est.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
- cl->rate_est.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
+ cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
+ cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
#endif
if (!cl->level && cl->un.leaf.q)
@@ -1379,21 +1219,22 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg,
}
static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
- struct Qdisc **old)
+ struct Qdisc **old)
{
- struct htb_class *cl = (struct htb_class*)arg;
+ struct htb_class *cl = (struct htb_class *)arg;
if (cl && !cl->level) {
- if (new == NULL && (new = qdisc_create_dflt(sch->dev,
- &pfifo_qdisc_ops)) == NULL)
- return -ENOBUFS;
+ if (new == NULL && (new = qdisc_create_dflt(sch->dev,
+ &pfifo_qdisc_ops))
+ == NULL)
+ return -ENOBUFS;
sch_tree_lock(sch);
if ((*old = xchg(&cl->un.leaf.q, new)) != NULL) {
if (cl->prio_activity)
- htb_deactivate (qdisc_priv(sch),cl);
+ htb_deactivate(qdisc_priv(sch), cl);
/* TODO: is it correct ? Why CBQ doesn't do it ? */
- sch->q.qlen -= (*old)->q.qlen;
+ sch->q.qlen -= (*old)->q.qlen;
qdisc_reset(*old);
}
sch_tree_unlock(sch);
@@ -1402,20 +1243,16 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
return -ENOENT;
}
-static struct Qdisc * htb_leaf(struct Qdisc *sch, unsigned long arg)
+static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
- struct htb_class *cl = (struct htb_class*)arg;
+ struct htb_class *cl = (struct htb_class *)arg;
return (cl && !cl->level) ? cl->un.leaf.q : NULL;
}
static unsigned long htb_get(struct Qdisc *sch, u32 classid)
{
-#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
-#endif
- struct htb_class *cl = htb_find(classid,sch);
- HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
- if (cl)
+ struct htb_class *cl = htb_find(classid, sch);
+ if (cl)
cl->refcnt++;
return (unsigned long)cl;
}
@@ -1430,10 +1267,9 @@ static void htb_destroy_filters(struct tcf_proto **fl)
}
}
-static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
+static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
{
struct htb_sched *q = qdisc_priv(sch);
- HTB_DBG(0,1,"htb_destrycls clid=%X ref=%d\n", cl?cl->classid:0,cl?cl->refcnt:0);
if (!cl->level) {
BUG_TRAP(cl->un.leaf.q);
sch->q.qlen -= cl->un.leaf.q->q.qlen;
@@ -1441,45 +1277,45 @@ static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
}
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
-
- htb_destroy_filters (&cl->filter_list);
-
- while (!list_empty(&cl->children))
- htb_destroy_class (sch,list_entry(cl->children.next,
- struct htb_class,sibling));
+
+ htb_destroy_filters(&cl->filter_list);
+
+ while (!list_empty(&cl->children))
+ htb_destroy_class(sch, list_entry(cl->children.next,
+ struct htb_class, sibling));
/* note: this delete may happen twice (see htb_delete) */
- list_del(&cl->hlist);
+ if (!hlist_unhashed(&cl->hlist))
+ hlist_del(&cl->hlist);
list_del(&cl->sibling);
-
+
if (cl->prio_activity)
- htb_deactivate (q,cl);
-
+ htb_deactivate(q, cl);
+
if (cl->cmode != HTB_CAN_SEND)
- htb_safe_rb_erase(&cl->pq_node,q->wait_pq+cl->level);
-
+ htb_safe_rb_erase(&cl->pq_node, q->wait_pq + cl->level);
+
kfree(cl);
}
/* always caled under BH & queue lock */
-static void htb_destroy(struct Qdisc* sch)
+static void htb_destroy(struct Qdisc *sch)
{
struct htb_sched *q = qdisc_priv(sch);
- HTB_DBG(0,1,"htb_destroy q=%p\n",q);
- del_timer_sync (&q->timer);
+ del_timer_sync(&q->timer);
#ifdef HTB_RATECM
- del_timer_sync (&q->rttim);
+ del_timer_sync(&q->rttim);
#endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
htb_destroy_filters(&q->filter_list);
-
- while (!list_empty(&q->root))
- htb_destroy_class (sch,list_entry(q->root.next,
- struct htb_class,sibling));
+
+ while (!list_empty(&q->root))
+ htb_destroy_class(sch, list_entry(q->root.next,
+ struct htb_class, sibling));
__skb_queue_purge(&q->direct_queue);
}
@@ -1487,24 +1323,25 @@ static void htb_destroy(struct Qdisc* sch)
static int htb_delete(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = (struct htb_class*)arg;
- HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
+ struct htb_class *cl = (struct htb_class *)arg;
// TODO: why don't allow to delete subtree ? references ? does
// tc subsys quarantee us that in htb_destroy it holds no class
// refs so that we can remove children safely there ?
if (!list_empty(&cl->children) || cl->filter_cnt)
return -EBUSY;
-
+
sch_tree_lock(sch);
-
+
/* delete from hash and active; remainder in destroy_class */
- list_del_init(&cl->hlist);
+ if (!hlist_unhashed(&cl->hlist))
+ hlist_del(&cl->hlist);
+
if (cl->prio_activity)
- htb_deactivate (q,cl);
+ htb_deactivate(q, cl);
if (--cl->refcnt == 0)
- htb_destroy_class(sch,cl);
+ htb_destroy_class(sch, cl);
sch_tree_unlock(sch);
return 0;
@@ -1512,45 +1349,46 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
static void htb_put(struct Qdisc *sch, unsigned long arg)
{
-#ifdef HTB_DEBUG
- struct htb_sched *q = qdisc_priv(sch);
-#endif
- struct htb_class *cl = (struct htb_class*)arg;
- HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
+ struct htb_class *cl = (struct htb_class *)arg;
if (--cl->refcnt == 0)
- htb_destroy_class(sch,cl);
+ htb_destroy_class(sch, cl);
}
-static int htb_change_class(struct Qdisc *sch, u32 classid,
- u32 parentid, struct rtattr **tca, unsigned long *arg)
+static int htb_change_class(struct Qdisc *sch, u32 classid,
+ u32 parentid, struct rtattr **tca,
+ unsigned long *arg)
{
int err = -EINVAL;
struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = (struct htb_class*)*arg,*parent;
- struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct htb_class *cl = (struct htb_class *)*arg, *parent;
+ struct rtattr *opt = tca[TCA_OPTIONS - 1];
struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct rtattr *tb[TCA_HTB_RTAB];
struct tc_htb_opt *hopt;
/* extract all subattrs from opt attr */
if (!opt || rtattr_parse_nested(tb, TCA_HTB_RTAB, opt) ||
- tb[TCA_HTB_PARMS-1] == NULL ||
- RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
+ tb[TCA_HTB_PARMS - 1] == NULL ||
+ RTA_PAYLOAD(tb[TCA_HTB_PARMS - 1]) < sizeof(*hopt))
goto failure;
-
- parent = parentid == TC_H_ROOT ? NULL : htb_find (parentid,sch);
- hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
- HTB_DBG(0,1,"htb_chg cl=%p(%X), clid=%X, parid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,classid,parentid,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
- rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
- ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
- if (!rtab || !ctab) goto failure;
+ parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
+
+ hopt = RTA_DATA(tb[TCA_HTB_PARMS - 1]);
+
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB - 1]);
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB - 1]);
+ if (!rtab || !ctab)
+ goto failure;
- if (!cl) { /* new class */
+ if (!cl) { /* new class */
struct Qdisc *new_q;
+ int prio;
+
/* check for valid classid */
- if (!classid || TC_H_MAJ(classid^sch->handle) || htb_find(classid,sch))
+ if (!classid || TC_H_MAJ(classid ^ sch->handle)
+ || htb_find(classid, sch))
goto failure;
/* check maximal depth */
@@ -1561,15 +1399,16 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
err = -ENOBUFS;
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
-
+
cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling);
- INIT_LIST_HEAD(&cl->hlist);
+ INIT_HLIST_NODE(&cl->hlist);
INIT_LIST_HEAD(&cl->children);
INIT_LIST_HEAD(&cl->un.leaf.drop_list);
-#ifdef HTB_DEBUG
- cl->magic = HTB_CMAGIC;
-#endif
+ RB_CLEAR_NODE(&cl->pq_node);
+
+ for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
+ RB_CLEAR_NODE(&cl->node[prio]);
/* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
so that can't be used inside of sch_tree_lock
@@ -1579,53 +1418,53 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (parent && !parent->level) {
/* turn parent into inner node */
sch->q.qlen -= parent->un.leaf.q->q.qlen;
- qdisc_destroy (parent->un.leaf.q);
- if (parent->prio_activity)
- htb_deactivate (q,parent);
+ qdisc_destroy(parent->un.leaf.q);
+ if (parent->prio_activity)
+ htb_deactivate(q, parent);
/* remove from evt list because of level change */
if (parent->cmode != HTB_CAN_SEND) {
- htb_safe_rb_erase(&parent->pq_node,q->wait_pq /*+0*/);
+ htb_safe_rb_erase(&parent->pq_node, q->wait_pq);
parent->cmode = HTB_CAN_SEND;
}
parent->level = (parent->parent ? parent->parent->level
- : TC_HTB_MAXDEPTH) - 1;
- memset (&parent->un.inner,0,sizeof(parent->un.inner));
+ : TC_HTB_MAXDEPTH) - 1;
+ memset(&parent->un.inner, 0, sizeof(parent->un.inner));
}
/* leaf (we) needs elementary qdisc */
cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
- cl->classid = classid; cl->parent = parent;
+ cl->classid = classid;
+ cl->parent = parent;
/* set class to be in HTB_CAN_SEND state */
cl->tokens = hopt->buffer;
cl->ctokens = hopt->cbuffer;
- cl->mbuffer = PSCHED_JIFFIE2US(HZ*60); /* 1min */
+ cl->mbuffer = PSCHED_JIFFIE2US(HZ * 60); /* 1min */
PSCHED_GET_TIME(cl->t_c);
cl->cmode = HTB_CAN_SEND;
/* attach to the hash list and parent's family */
- list_add_tail(&cl->hlist, q->hash+htb_hash(classid));
- list_add_tail(&cl->sibling, parent ? &parent->children : &q->root);
-#ifdef HTB_DEBUG
- {
- int i;
- for (i = 0; i < TC_HTB_NUMPRIO; i++) cl->node[i].rb_color = -1;
- cl->pq_node.rb_color = -1;
- }
-#endif
- } else sch_tree_lock(sch);
+ hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
+ list_add_tail(&cl->sibling,
+ parent ? &parent->children : &q->root);
+ } else
+ sch_tree_lock(sch);
/* it used to be a nasty bug here, we have to check that node
- is really leaf before changing cl->un.leaf ! */
+ is really leaf before changing cl->un.leaf ! */
if (!cl->level) {
cl->un.leaf.quantum = rtab->rate.rate / q->rate2quantum;
if (!hopt->quantum && cl->un.leaf.quantum < 1000) {
- printk(KERN_WARNING "HTB: quantum of class %X is small. Consider r2q change.\n", cl->classid);
+ printk(KERN_WARNING
+ "HTB: quantum of class %X is small. Consider r2q change.\n",
+ cl->classid);
cl->un.leaf.quantum = 1000;
}
if (!hopt->quantum && cl->un.leaf.quantum > 200000) {
- printk(KERN_WARNING "HTB: quantum of class %X is big. Consider r2q change.\n", cl->classid);
+ printk(KERN_WARNING
+ "HTB: quantum of class %X is big. Consider r2q change.\n",
+ cl->classid);
cl->un.leaf.quantum = 200000;
}
if (hopt->quantum)
@@ -1636,16 +1475,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->buffer = hopt->buffer;
cl->cbuffer = hopt->cbuffer;
- if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
- if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
+ if (cl->rate)
+ qdisc_put_rtab(cl->rate);
+ cl->rate = rtab;
+ if (cl->ceil)
+ qdisc_put_rtab(cl->ceil);
+ cl->ceil = ctab;
sch_tree_unlock(sch);
*arg = (unsigned long)cl;
return 0;
failure:
- if (rtab) qdisc_put_rtab(rtab);
- if (ctab) qdisc_put_rtab(ctab);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ if (ctab)
+ qdisc_put_rtab(ctab);
return err;
}
@@ -1654,28 +1499,28 @@ static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
- HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
+
return fl;
}
static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
- u32 classid)
+ u32 classid)
{
struct htb_sched *q = qdisc_priv(sch);
- struct htb_class *cl = htb_find (classid,sch);
- HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
+ struct htb_class *cl = htb_find(classid, sch);
+
/*if (cl && !cl->level) return 0;
- The line above used to be there to prevent attaching filters to
- leaves. But at least tc_index filter uses this just to get class
- for other reasons so that we have to allow for it.
- ----
- 19.6.2002 As Werner explained it is ok - bind filter is just
- another way to "lock" the class - unlike "get" this lock can
- be broken by class during destroy IIUC.
+ The line above used to be there to prevent attaching filters to
+ leaves. But at least tc_index filter uses this just to get class
+ for other reasons so that we have to allow for it.
+ ----
+ 19.6.2002 As Werner explained it is ok - bind filter is just
+ another way to "lock" the class - unlike "get" this lock can
+ be broken by class during destroy IIUC.
*/
- if (cl)
- cl->filter_cnt++;
- else
+ if (cl)
+ cl->filter_cnt++;
+ else
q->filter_cnt++;
return (unsigned long)cl;
}
@@ -1684,10 +1529,10 @@ static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
{
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)arg;
- HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
- if (cl)
- cl->filter_cnt--;
- else
+
+ if (cl)
+ cl->filter_cnt--;
+ else
q->filter_cnt--;
}
@@ -1700,9 +1545,10 @@ static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
return;
for (i = 0; i < HTB_HSIZE; i++) {
- struct list_head *p;
- list_for_each (p,q->hash+i) {
- struct htb_class *cl = list_entry(p,struct htb_class,hlist);
+ struct hlist_node *p;
+ struct htb_class *cl;
+
+ hlist_for_each_entry(cl, p, q->hash + i, hlist) {
if (arg->count < arg->skip) {
arg->count++;
continue;
@@ -1750,12 +1596,13 @@ static struct Qdisc_ops htb_qdisc_ops = {
static int __init htb_module_init(void)
{
- return register_qdisc(&htb_qdisc_ops);
+ return register_qdisc(&htb_qdisc_ops);
}
-static void __exit htb_module_exit(void)
+static void __exit htb_module_exit(void)
{
- unregister_qdisc(&htb_qdisc_ops);
+ unregister_qdisc(&htb_qdisc_ops);
}
+
module_init(htb_module_init)
module_exit(htb_module_exit)
MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index a08ec4c7c55d..45939bafbdf8 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -192,8 +192,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
*/
if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
if (!(skb = skb_unshare(skb, GFP_ATOMIC))
- || (skb->ip_summed == CHECKSUM_HW
- && skb_checksum_help(skb, 0))) {
+ || (skb->ip_summed == CHECKSUM_PARTIAL
+ && skb_checksum_help(skb))) {
sch->qstats.drops++;
return NET_XMIT_DROP;
}