summaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-08-17 00:37:26 +0200
committerDavid S. Miller <davem@davemloft.net>2013-08-17 00:37:26 +0200
commit2ff1cf12c9fe70e75e600404e6a4274b19d293ed (patch)
treebeafddac0a8098e3f07d2ec60e44a2a7d006e605 /net/sched
parentnetlink: Eliminate kmalloc in netlink dump operation. (diff)
parentMerge tag 'clk-fixes-for-linus' of git://git.linaro.org/people/mturquette/linux (diff)
downloadlinux-2ff1cf12c9fe70e75e600404e6a4274b19d293ed.tar.xz
linux-2ff1cf12c9fe70e75e600404e6a4274b19d293ed.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c41
-rw-r--r--net/sched/sch_generic.c8
-rw-r--r--net/sched/sch_htb.c13
3 files changed, 61 insertions, 1 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f6..51b968d3febb 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
return q;
}
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value. The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell. If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+ int low = roundup(r->mpu, 48);
+ int high = roundup(low+1, 48);
+ int cell_low = low >> r->cell_log;
+ int cell_high = (high >> r->cell_log) - 1;
+
+ /* rtab is too inaccurate at rates > 100Mbit/s */
+ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+ pr_debug("TC linklayer: Giving up ATM detection\n");
+ return TC_LINKLAYER_ETHERNET;
+ }
+
+ if ((cell_high > cell_low) && (cell_high < 256)
+ && (rtab[cell_low] == rtab[cell_high])) {
+ pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+ cell_low, cell_high, rtab[cell_high]);
+ return TC_LINKLAYER_ATM;
+ }
+ return TC_LINKLAYER_ETHERNET;
+}
+
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
+ if (r->linklayer == TC_LINKLAYER_UNAWARE)
+ r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76e..48be3d5c0d92 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q)
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res = dev->trans_start;
+ unsigned long val, res;
unsigned int i;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ res = dev->trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
dev->trans_start = res;
+
return res;
}
EXPORT_SYMBOL(dev_trans_start);
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = conf->rate;
+ r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 45e751527dfc..c2178b15ca6e 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
+ /* Keeping backward compatible with rate_table based iproute2 tc */
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ }
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+ if (ctab)
+ qdisc_put_rtab(ctab);
+ }
+
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;