summaryrefslogtreecommitdiffstats
path: root/net/sched/cls_flow.c
diff options
context:
space:
mode:
authorJiri Pirko <jiri@mellanox.com>2017-08-04 14:29:12 +0200
committerDavid S. Miller <davem@davemloft.net>2017-08-04 20:21:24 +0200
commitc09fc2e11ed1b7fc8cfa97fb1da544225fc32277 (patch)
tree9534347eac6389eb05e025c46814585b9862a492 /net/sched/cls_flow.c
parentnet: sched: cls_cgroup: no need to call tcf_exts_change for newly allocated s... (diff)
downloadlinux-c09fc2e11ed1b7fc8cfa97fb1da544225fc32277.tar.xz
linux-c09fc2e11ed1b7fc8cfa97fb1da544225fc32277.zip
net: sched: cls_flow: no need to call tcf_exts_change for newly allocated struct
As the fnew struct just was allocated, so no need to use tcf_exts_change to do atomic change, and we can just fill-up the unused exts struct directly by tcf_exts_validate. Signed-off-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/cls_flow.c')
-rw-r--r--net/sched/cls_flow.c41
1 files changed, 16 insertions, 25 deletions
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 71fd1af01726..55e281b20140 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -388,7 +388,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
struct flow_filter *fold, *fnew;
struct nlattr *opt = tca[TCA_OPTIONS];
struct nlattr *tb[TCA_FLOW_MAX + 1];
- struct tcf_exts e;
unsigned int nkeys = 0;
unsigned int perturb_period = 0;
u32 baseclass = 0;
@@ -424,31 +423,27 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
return -EOPNOTSUPP;
}
- err = tcf_exts_init(&e, TCA_FLOW_ACT, TCA_FLOW_POLICE);
- if (err < 0)
- goto err1;
- err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
- if (err < 0)
- goto err1;
-
- err = -ENOBUFS;
fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
if (!fnew)
- goto err1;
+ return -ENOBUFS;
err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
if (err < 0)
- goto err2;
+ goto err1;
err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
if (err < 0)
- goto err3;
+ goto err2;
+
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
+ if (err < 0)
+ goto err2;
fold = (struct flow_filter *)*arg;
if (fold) {
err = -EINVAL;
if (fold->handle != handle && handle)
- goto err3;
+ goto err2;
/* Copy fold into fnew */
fnew->tp = fold->tp;
@@ -468,31 +463,31 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1)
- goto err3;
+ goto err2;
if (mode == FLOW_MODE_HASH)
perturb_period = fold->perturb_period;
if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH)
- goto err3;
+ goto err2;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
}
} else {
err = -EINVAL;
if (!handle)
- goto err3;
+ goto err2;
if (!tb[TCA_FLOW_KEYS])
- goto err3;
+ goto err2;
mode = FLOW_MODE_MAP;
if (tb[TCA_FLOW_MODE])
mode = nla_get_u32(tb[TCA_FLOW_MODE]);
if (mode != FLOW_MODE_HASH && nkeys > 1)
- goto err3;
+ goto err2;
if (tb[TCA_FLOW_PERTURB]) {
if (mode != FLOW_MODE_HASH)
- goto err3;
+ goto err2;
perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
}
@@ -510,8 +505,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
setup_deferrable_timer(&fnew->perturb_timer, flow_perturbation,
(unsigned long)fnew);
- tcf_exts_change(tp, &fnew->exts, &e);
-
netif_keep_dst(qdisc_dev(tp->q));
if (tb[TCA_FLOW_KEYS]) {
@@ -550,13 +543,11 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
call_rcu(&fold->rcu, flow_destroy_filter);
return 0;
-err3:
+err2:
tcf_exts_destroy(&fnew->exts);
tcf_em_tree_destroy(&fnew->ematches);
-err2:
- kfree(fnew);
err1:
- tcf_exts_destroy(&e);
+ kfree(fnew);
return err;
}