diff options
author | Patrick McHardy <kaber@trash.net> | 2007-04-17 02:02:10 +0200 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 07:29:07 +0200 |
commit | 0463d4ae25771aaf3379bb6b2392f6edf23c2828 (patch) | |
tree | 5c820b718abfe086a7b1d91814cb99d721439a46 /net | |
parent | [NETLINK]: don't reinitialize callback mutex (diff) | |
download | linux-0463d4ae25771aaf3379bb6b2392f6edf23c2828.tar.xz linux-0463d4ae25771aaf3379bb6b2392f6edf23c2828.zip |
[NET_SCHED]: Eliminate qdisc_tree_lock
Since we're now holding the rtnl during the entire dump operation, we
can remove qdisc_tree_lock, whose only purpose is to protect dump
callbacks from concurrent changes to the qdisc tree.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/sched/cls_api.c | 2 | ||||
-rw-r--r-- | net/sched/sch_api.c | 22 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 29 |
3 files changed, 10 insertions, 43 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ca3da5013b7a..ebf94edf0478 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -400,7 +400,6 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb) if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL) return skb->len; - read_lock(&qdisc_tree_lock); if (!tcm->tcm_parent) q = dev->qdisc_sleeping; else @@ -457,7 +456,6 @@ errout: if (cl) cops->put(q, cl); out: - read_unlock(&qdisc_tree_lock); dev_put(dev); return skb->len; } diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 2e863bdaa9a1..0ce6914f5981 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -191,7 +191,7 @@ int unregister_qdisc(struct Qdisc_ops *qops) (root qdisc, all its children, children of children etc.) */ -static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) +struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) { struct Qdisc *q; @@ -202,16 +202,6 @@ static struct Qdisc *__qdisc_lookup(struct net_device *dev, u32 handle) return NULL; } -struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) -{ - struct Qdisc *q; - - read_lock(&qdisc_tree_lock); - q = __qdisc_lookup(dev, handle); - read_unlock(&qdisc_tree_lock); - return q; -} - static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid) { unsigned long cl; @@ -405,7 +395,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n) if (n == 0) return; while ((parentid = sch->parent)) { - sch = __qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); + sch = qdisc_lookup(sch->dev, TC_H_MAJ(parentid)); cops = sch->ops->cl_ops; if (cops->qlen_notify) { cl = cops->get(sch, parentid); @@ -905,7 +895,6 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) continue; if (idx > s_idx) s_q_idx = 0; - read_lock(&qdisc_tree_lock); q_idx = 0; list_for_each_entry(q, &dev->qdisc_list, list) { if (q_idx < s_q_idx) { @@ -913,13 +902,10 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) continue; } if (tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).pid, - cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) { - read_unlock(&qdisc_tree_lock); + cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) goto done; - } q_idx++; } - read_unlock(&qdisc_tree_lock); } done: @@ -1142,7 +1128,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; t = 0; - read_lock(&qdisc_tree_lock); list_for_each_entry(q, &dev->qdisc_list, list) { if (t < s_t || !q->ops->cl_ops || (tcm->tcm_parent && @@ -1164,7 +1149,6 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb) break; t++; } - read_unlock(&qdisc_tree_lock); cb->args[0] = t; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 52eb3439d7c6..1894eb72f6cf 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -36,34 +36,23 @@ /* Main transmission queue. */ -/* Main qdisc structure lock. - - However, modifications - to data, participating in scheduling must be additionally - protected with dev->queue_lock spinlock. - - The idea is the following: - - enqueue, dequeue are serialized via top level device - spinlock dev->queue_lock. - - tree walking is protected by read_lock(qdisc_tree_lock) - and this lock is used only in process context. - - updates to tree are made only under rtnl semaphore, - hence this lock may be made without local bh disabling. - - qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! +/* Modifications to data participating in scheduling must be protected with + * dev->queue_lock spinlock. + * + * The idea is the following: + * - enqueue, dequeue are serialized via top level device + * spinlock dev->queue_lock. + * - updates to tree and tree walking are only done under the rtnl mutex. */ -DEFINE_RWLOCK(qdisc_tree_lock); void qdisc_lock_tree(struct net_device *dev) { - write_lock(&qdisc_tree_lock); spin_lock_bh(&dev->queue_lock); } void qdisc_unlock_tree(struct net_device *dev) { spin_unlock_bh(&dev->queue_lock); - write_unlock(&qdisc_tree_lock); } /* @@ -528,15 +517,11 @@ void dev_activate(struct net_device *dev) printk(KERN_INFO "%s: activation failed\n", dev->name); return; } - write_lock(&qdisc_tree_lock); list_add_tail(&qdisc->list, &dev->qdisc_list); - write_unlock(&qdisc_tree_lock); } else { qdisc = &noqueue_qdisc; } - write_lock(&qdisc_tree_lock); dev->qdisc_sleeping = qdisc; - write_unlock(&qdisc_tree_lock); } if (!netif_carrier_ok(dev)) |