summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_tcindex.c
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2008-01-24 05:36:12 +0100
committerDavid S. Miller <davem@davemloft.net>2008-01-29 00:11:23 +0100
commit6fa8c0144b770dac941cf2c15053b6e24f046c8a (patch)
tree7c91cd0e9a98f0532df30a9d8549e2c76a3ce716 /net/sched/cls_tcindex.c
parent[NET_SCHED]: Use nla_policy for attribute validation in packet schedulers (diff)
downloadlinux-6fa8c0144b770dac941cf2c15053b6e24f046c8a.tar.xz
linux-6fa8c0144b770dac941cf2c15053b6e24f046c8a.zip
[NET_SCHED]: Use nla_policy for attribute validation in classifiers
Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_tcindex.c')
-rw-r--r--net/sched/cls_tcindex.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 28098564b4d7..ee60b2d1705d 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -193,6 +193,14 @@ valid_perfect_hash(struct tcindex_data *p)
return p->hash > (p->mask >> p->shift);
}
+static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
+ [TCA_TCINDEX_HASH] = { .type = NLA_U32 },
+ [TCA_TCINDEX_MASK] = { .type = NLA_U16 },
+ [TCA_TCINDEX_SHIFT] = { .type = NLA_U32 },
+ [TCA_TCINDEX_FALL_THROUGH] = { .type = NLA_U32 },
+ [TCA_TCINDEX_CLASSID] = { .type = NLA_U32 },
+};
+
static int
tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
struct tcindex_data *p, struct tcindex_filter_result *r,
@@ -217,24 +225,14 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
else
memset(&cr, 0, sizeof(cr));
- err = -EINVAL;
- if (tb[TCA_TCINDEX_HASH]) {
- if (nla_len(tb[TCA_TCINDEX_HASH]) < sizeof(u32))
- goto errout;
+ if (tb[TCA_TCINDEX_HASH])
cp.hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
- }
- if (tb[TCA_TCINDEX_MASK]) {
- if (nla_len(tb[TCA_TCINDEX_MASK]) < sizeof(u16))
- goto errout;
+ if (tb[TCA_TCINDEX_MASK])
cp.mask = nla_get_u16(tb[TCA_TCINDEX_MASK]);
- }
- if (tb[TCA_TCINDEX_SHIFT]) {
- if (nla_len(tb[TCA_TCINDEX_SHIFT]) < sizeof(int))
- goto errout;
+ if (tb[TCA_TCINDEX_SHIFT])
cp.shift = nla_get_u32(tb[TCA_TCINDEX_SHIFT]);
- }
err = -EBUSY;
/* Hash already allocated, make sure that we still meet the
@@ -248,11 +246,8 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
goto errout;
err = -EINVAL;
- if (tb[TCA_TCINDEX_FALL_THROUGH]) {
- if (nla_len(tb[TCA_TCINDEX_FALL_THROUGH]) < sizeof(u32))
- goto errout;
+ if (tb[TCA_TCINDEX_FALL_THROUGH])
cp.fall_through = nla_get_u32(tb[TCA_TCINDEX_FALL_THROUGH]);
- }
if (!cp.hash) {
/* Hash not specified, use perfect hash if the upper limit
@@ -358,7 +353,7 @@ tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
if (!opt)
return 0;
- err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, NULL);
+ err = nla_parse_nested(tb, TCA_TCINDEX_MAX, opt, tcindex_policy);
if (err < 0)
return err;