diff options
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_api.c | 4 | ||||
-rw-r--r-- | net/sched/act_csum.c | 3 | ||||
-rw-r--r-- | net/sched/act_ct.c | 3 | ||||
-rw-r--r-- | net/sched/act_gact.c | 13 | ||||
-rw-r--r-- | net/sched/act_gate.c | 3 | ||||
-rw-r--r-- | net/sched/act_mirred.c | 4 | ||||
-rw-r--r-- | net/sched/act_mpls.c | 10 | ||||
-rw-r--r-- | net/sched/act_pedit.c | 4 | ||||
-rw-r--r-- | net/sched/act_police.c | 20 | ||||
-rw-r--r-- | net/sched/act_sample.c | 3 | ||||
-rw-r--r-- | net/sched/act_skbedit.c | 65 | ||||
-rw-r--r-- | net/sched/act_tunnel_key.c | 4 | ||||
-rw-r--r-- | net/sched/act_vlan.c | 4 | ||||
-rw-r--r-- | net/sched/cls_api.c | 22 | ||||
-rw-r--r-- | net/sched/cls_flower.c | 104 | ||||
-rw-r--r-- | net/sched/cls_matchall.c | 19 | ||||
-rw-r--r-- | net/sched/em_meta.c | 7 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 12 |
18 files changed, 211 insertions, 93 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 4f51094da9da..da9733da9868 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -195,7 +195,7 @@ static int offload_action_init(struct flow_offload_action *fl_action, if (act->ops->offload_act_setup) { spin_lock_bh(&act->tcfa_lock); err = act->ops->offload_act_setup(act, fl_action, NULL, - false); + false, extack); spin_unlock_bh(&act->tcfa_lock); return err; } @@ -271,7 +271,7 @@ static int tcf_action_offload_add_ex(struct tc_action *action, if (err) goto fl_err; - err = tc_setup_action(&fl_action->action, actions); + err = tc_setup_action(&fl_action->action, actions, extack); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to setup tc actions for offload"); diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index e0f515b774ca..22847ee009ef 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -696,7 +696,8 @@ static size_t tcf_csum_get_fill_size(const struct tc_action *act) } static int tcf_csum_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index b1f502fce595..8af9d6e5ba61 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -1584,7 +1584,8 @@ static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets, } static int tcf_ct_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index bde6a6c01e64..ac29d1065232 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -253,7 +253,8 @@ static size_t tcf_gact_get_fill_size(const struct tc_action *act) } static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -267,7 +268,17 @@ static int tcf_gact_offload_act_setup(struct tc_action *act, void *entry_data, } else if (is_tcf_gact_goto_chain(act)) { entry->id = FLOW_ACTION_GOTO; entry->chain_index = tcf_gact_goto_chain_index(act); + } else if (is_tcf_gact_continue(act)) { + NL_SET_ERR_MSG_MOD(extack, "Offload of \"continue\" action is not supported"); + return -EOPNOTSUPP; + } else if (is_tcf_gact_reclassify(act)) { + NL_SET_ERR_MSG_MOD(extack, "Offload of \"reclassify\" action is not supported"); + return -EOPNOTSUPP; + } else if (is_tcf_gact_pipe(act)) { + NL_SET_ERR_MSG_MOD(extack, "Offload of \"pipe\" action is not supported"); + return -EOPNOTSUPP; } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported generic action offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c index d56e73843a4b..fd5155274733 100644 --- a/net/sched/act_gate.c +++ b/net/sched/act_gate.c @@ -619,7 +619,8 @@ static int tcf_gate_get_entries(struct flow_action_entry *entry, } static int tcf_gate_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { int err; diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c index 39acd1d18609..ebb92fb072ab 100644 --- a/net/sched/act_mirred.c +++ b/net/sched/act_mirred.c @@ -460,7 +460,8 @@ static void tcf_offload_mirred_get_dev(struct flow_action_entry *entry, } static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -478,6 +479,7 @@ static int tcf_mirred_offload_act_setup(struct tc_action *act, void *entry_data, entry->id = FLOW_ACTION_MIRRED_INGRESS; tcf_offload_mirred_get_dev(entry, act); } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported mirred offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c index b9ff3459fdab..adabeccb63e1 100644 --- a/net/sched/act_mpls.c +++ b/net/sched/act_mpls.c @@ -385,7 +385,8 @@ static int tcf_mpls_search(struct net *net, struct tc_action **a, u32 index) } static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -410,7 +411,14 @@ static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, entry->mpls_mangle.bos = tcf_mpls_bos(act); entry->mpls_mangle.ttl = tcf_mpls_ttl(act); break; + case TCA_MPLS_ACT_DEC_TTL: + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"dec_ttl\" option is used"); + return -EOPNOTSUPP; + case TCA_MPLS_ACT_MAC_PUSH: + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"mac_push\" option is used"); + return -EOPNOTSUPP; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported MPLS mode offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 211c757bfc3c..823ee643371c 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -510,7 +510,8 @@ static int tcf_pedit_search(struct net *net, struct tc_action **a, u32 index) } static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -525,6 +526,7 @@ static int tcf_pedit_offload_act_setup(struct tc_action *act, void *entry_data, entry->id = FLOW_ACTION_ADD; break; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit command offload"); return -EOPNOTSUPP; } entry->mangle.htype = tcf_pedit_htype(act, k); diff --git a/net/sched/act_police.c b/net/sched/act_police.c index f4d917705263..79c8901f66ab 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -419,7 +419,8 @@ static int tcf_police_search(struct net *net, struct tc_action **a, u32 index) return tcf_idr_search(tn, a, index); } -static int tcf_police_act_to_flow_act(int tc_act, u32 *extval) +static int tcf_police_act_to_flow_act(int tc_act, u32 *extval, + struct netlink_ext_ack *extack) { int act_id = -EOPNOTSUPP; @@ -430,19 +431,28 @@ static int tcf_police_act_to_flow_act(int tc_act, u32 *extval) act_id = FLOW_ACTION_DROP; else if (tc_act == TC_ACT_PIPE) act_id = FLOW_ACTION_PIPE; + else if (tc_act == TC_ACT_RECLASSIFY) + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"reclassify\""); + else + NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload"); } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_GOTO_CHAIN)) { act_id = FLOW_ACTION_GOTO; *extval = tc_act & TC_ACT_EXT_VAL_MASK; } else if (TC_ACT_EXT_CMP(tc_act, TC_ACT_JUMP)) { act_id = FLOW_ACTION_JUMP; *extval = tc_act & TC_ACT_EXT_VAL_MASK; + } else if (tc_act == TC_ACT_UNSPEC) { + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform/exceed action is \"continue\""); + } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported conform/exceed action offload"); } return act_id; } static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -466,14 +476,16 @@ static int tcf_police_offload_act_setup(struct tc_action *act, void *entry_data, entry->police.mtu = tcf_police_tcfp_mtu(act); act_id = tcf_police_act_to_flow_act(police->tcf_action, - &entry->police.exceed.extval); + &entry->police.exceed.extval, + extack); if (act_id < 0) return act_id; entry->police.exceed.act_id = act_id; act_id = tcf_police_act_to_flow_act(p->tcfp_result, - &entry->police.notexceed.extval); + &entry->police.notexceed.extval, + extack); if (act_id < 0) return act_id; diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index 9a22cdda6bbd..2f7f5e44d28c 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -291,7 +291,8 @@ static void tcf_offload_sample_get_group(struct flow_action_entry *entry, } static int tcf_sample_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c index ceba11b198bb..e3bd11dfe1ca 100644 --- a/net/sched/act_skbedit.c +++ b/net/sched/act_skbedit.c @@ -23,6 +23,20 @@ static unsigned int skbedit_net_id; static struct tc_action_ops act_skbedit_ops; +static u16 tcf_skbedit_hash(struct tcf_skbedit_params *params, + struct sk_buff *skb) +{ + u16 queue_mapping = params->queue_mapping; + + if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { + u32 hash = skb_get_hash(skb); + + queue_mapping += hash % params->mapping_mod; + } + + return netdev_cap_txqueue(skb->dev, queue_mapping); +} + static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { @@ -58,8 +72,12 @@ static int tcf_skbedit_act(struct sk_buff *skb, const struct tc_action *a, } } if (params->flags & SKBEDIT_F_QUEUE_MAPPING && - skb->dev->real_num_tx_queues > params->queue_mapping) - skb_set_queue_mapping(skb, params->queue_mapping); + skb->dev->real_num_tx_queues > params->queue_mapping) { +#ifdef CONFIG_NET_EGRESS + netdev_xmit_skip_txqueue(true); +#endif + skb_set_queue_mapping(skb, tcf_skbedit_hash(params, skb)); + } if (params->flags & SKBEDIT_F_MARK) { skb->mark &= ~params->mask; skb->mark |= params->mark & params->mask; @@ -92,6 +110,7 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = { [TCA_SKBEDIT_PTYPE] = { .len = sizeof(u16) }, [TCA_SKBEDIT_MASK] = { .len = sizeof(u32) }, [TCA_SKBEDIT_FLAGS] = { .len = sizeof(u64) }, + [TCA_SKBEDIT_QUEUE_MAPPING_MAX] = { .len = sizeof(u16) }, }; static int tcf_skbedit_init(struct net *net, struct nlattr *nla, @@ -108,6 +127,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, struct tcf_skbedit *d; u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL; u16 *queue_mapping = NULL, *ptype = NULL; + u16 mapping_mod = 1; bool exists = false; int ret = 0, err; u32 index; @@ -153,6 +173,25 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, if (tb[TCA_SKBEDIT_FLAGS] != NULL) { u64 *pure_flags = nla_data(tb[TCA_SKBEDIT_FLAGS]); + if (*pure_flags & SKBEDIT_F_TXQ_SKBHASH) { + u16 *queue_mapping_max; + + if (!tb[TCA_SKBEDIT_QUEUE_MAPPING] || + !tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]) { + NL_SET_ERR_MSG_MOD(extack, "Missing required range of queue_mapping."); + return -EINVAL; + } + + queue_mapping_max = + nla_data(tb[TCA_SKBEDIT_QUEUE_MAPPING_MAX]); + if (*queue_mapping_max < *queue_mapping) { + NL_SET_ERR_MSG_MOD(extack, "The range of queue_mapping is invalid, max < min."); + return -EINVAL; + } + + mapping_mod = *queue_mapping_max - *queue_mapping + 1; + flags |= SKBEDIT_F_TXQ_SKBHASH; + } if (*pure_flags & SKBEDIT_F_INHERITDSFIELD) flags |= SKBEDIT_F_INHERITDSFIELD; } @@ -204,8 +243,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla, params_new->flags = flags; if (flags & SKBEDIT_F_PRIORITY) params_new->priority = *priority; - if (flags & SKBEDIT_F_QUEUE_MAPPING) + if (flags & SKBEDIT_F_QUEUE_MAPPING) { params_new->queue_mapping = *queue_mapping; + params_new->mapping_mod = mapping_mod; + } if (flags & SKBEDIT_F_MARK) params_new->mark = *mark; if (flags & SKBEDIT_F_PTYPE) @@ -272,6 +313,13 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a, goto nla_put_failure; if (params->flags & SKBEDIT_F_INHERITDSFIELD) pure_flags |= SKBEDIT_F_INHERITDSFIELD; + if (params->flags & SKBEDIT_F_TXQ_SKBHASH) { + if (nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING_MAX, + params->queue_mapping + params->mapping_mod - 1)) + goto nla_put_failure; + + pure_flags |= SKBEDIT_F_TXQ_SKBHASH; + } if (pure_flags != 0 && nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags)) goto nla_put_failure; @@ -321,6 +369,7 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) return nla_total_size(sizeof(struct tc_skbedit)) + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_PRIORITY */ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING */ + + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_QUEUE_MAPPING_MAX */ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MARK */ + nla_total_size(sizeof(u16)) /* TCA_SKBEDIT_PTYPE */ + nla_total_size(sizeof(u32)) /* TCA_SKBEDIT_MASK */ @@ -328,7 +377,8 @@ static size_t tcf_skbedit_get_fill_size(const struct tc_action *act) } static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -342,7 +392,14 @@ static int tcf_skbedit_offload_act_setup(struct tc_action *act, void *entry_data } else if (is_tcf_skbedit_priority(act)) { entry->id = FLOW_ACTION_PRIORITY; entry->priority = tcf_skbedit_priority(act); + } else if (is_tcf_skbedit_queue_mapping(act)) { + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"queue_mapping\" option is used"); + return -EOPNOTSUPP; + } else if (is_tcf_skbedit_inheritdsfield(act)) { + NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"inheritdsfield\" option is used"); + return -EOPNOTSUPP; } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported skbedit option offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 23aba03d26a8..856dc23cef8c 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -808,7 +808,8 @@ static int tcf_tunnel_encap_get_tunnel(struct flow_action_entry *entry, static int tcf_tunnel_key_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, - bool bind) + bool bind, + struct netlink_ext_ack *extack) { int err; @@ -823,6 +824,7 @@ static int tcf_tunnel_key_offload_act_setup(struct tc_action *act, } else if (is_tcf_tunnel_release(act)) { entry->id = FLOW_ACTION_TUNNEL_DECAP; } else { + NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel key mode offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 883454c4f921..68b5e772386a 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -369,7 +369,8 @@ static size_t tcf_vlan_get_fill_size(const struct tc_action *act) } static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, - u32 *index_inc, bool bind) + u32 *index_inc, bool bind, + struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; @@ -398,6 +399,7 @@ static int tcf_vlan_offload_act_setup(struct tc_action *act, void *entry_data, tcf_vlan_push_eth(entry->vlan_push_eth.src, entry->vlan_push_eth.dst, act); break; default: + NL_SET_ERR_MSG_MOD(extack, "Unsupported vlan action mode offload"); return -EOPNOTSUPP; } *index_inc = 1; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index f0699f39afdb..9bb4d3dcc994 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3513,20 +3513,25 @@ EXPORT_SYMBOL(tc_cleanup_offload_action); static int tc_setup_offload_act(struct tc_action *act, struct flow_action_entry *entry, - u32 *index_inc) + u32 *index_inc, + struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT - if (act->ops->offload_act_setup) - return act->ops->offload_act_setup(act, entry, index_inc, true); - else + if (act->ops->offload_act_setup) { + return act->ops->offload_act_setup(act, entry, index_inc, true, + extack); + } else { + NL_SET_ERR_MSG(extack, "Action does not support offload"); return -EOPNOTSUPP; + } #else return 0; #endif } int tc_setup_action(struct flow_action *flow_action, - struct tc_action *actions[]) + struct tc_action *actions[], + struct netlink_ext_ack *extack) { int i, j, index, err = 0; struct tc_action *act; @@ -3551,7 +3556,7 @@ int tc_setup_action(struct flow_action *flow_action, entry->hw_stats = tc_act_hw_stats(act->hw_stats); entry->hw_index = act->tcfa_index; index = 0; - err = tc_setup_offload_act(act, entry, &index); + err = tc_setup_offload_act(act, entry, &index, extack); if (!err) j += index; else @@ -3570,13 +3575,14 @@ err_out_locked: } int tc_setup_offload_action(struct flow_action *flow_action, - const struct tcf_exts *exts) + const struct tcf_exts *exts, + struct netlink_ext_ack *extack) { #ifdef CONFIG_NET_CLS_ACT if (!exts) return 0; - return tc_setup_action(flow_action, exts->actions); + return tc_setup_action(flow_action, exts->actions, extack); #else return 0; #endif diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index ed5e6f08e74a..dcca70144dff 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -72,6 +72,7 @@ struct fl_flow_key { } tp_range; struct flow_dissector_key_ct ct; struct flow_dissector_key_hash hash; + struct flow_dissector_key_num_of_vlans num_of_vlans; } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */ struct fl_flow_mask_range { @@ -464,14 +465,12 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, cls_flower.rule->match.key = &f->mkey; cls_flower.classid = f->res.classid; - err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, + cls_flower.common.extack); if (err) { kfree(cls_flower.rule); - if (skip_sw) { - NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); - return err; - } - return 0; + + return skip_sw ? err : 0; } err = tc_setup_cb_add(block, tp, TC_SETUP_CLSFLOWER, &cls_flower, @@ -714,6 +713,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_FLAGS] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH] = { .type = NLA_U32 }, [TCA_FLOWER_KEY_HASH_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_NUM_OF_VLANS] = { .type = NLA_U8 }, }; @@ -1030,8 +1030,10 @@ static void fl_set_key_vlan(struct nlattr **tb, VLAN_PRIORITY_MASK; key_mask->vlan_priority = VLAN_PRIORITY_MASK; } - key_val->vlan_tpid = ethertype; - key_mask->vlan_tpid = cpu_to_be16(~0); + if (ethertype) { + key_val->vlan_tpid = ethertype; + key_mask->vlan_tpid = cpu_to_be16(~0); + } if (tb[vlan_next_eth_type_key]) { key_val->vlan_eth_type = nla_get_be16(tb[vlan_next_eth_type_key]); @@ -1581,6 +1583,26 @@ static int fl_set_key_ct(struct nlattr **tb, return 0; } +static bool is_vlan_key(struct nlattr *tb, __be16 *ethertype, + struct fl_flow_key *key, struct fl_flow_key *mask, + int vthresh) +{ + const bool good_num_of_vlans = key->num_of_vlans.num_of_vlans > vthresh; + + if (!tb) { + *ethertype = 0; + return good_num_of_vlans; + } + + *ethertype = nla_get_be16(tb); + if (good_num_of_vlans || eth_type_vlan(*ethertype)) + return true; + + key->basic.n_proto = *ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + return false; +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask, struct netlink_ext_ack *extack) @@ -1602,37 +1624,30 @@ static int fl_set_key(struct net *net, struct nlattr **tb, fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)); - - if (tb[TCA_FLOWER_KEY_ETH_TYPE]) { - ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); - - if (eth_type_vlan(ethertype)) { - fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, - TCA_FLOWER_KEY_VLAN_PRIO, - TCA_FLOWER_KEY_VLAN_ETH_TYPE, - &key->vlan, &mask->vlan); - - if (tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]) { - ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE]); - if (eth_type_vlan(ethertype)) { - fl_set_key_vlan(tb, ethertype, - TCA_FLOWER_KEY_CVLAN_ID, - TCA_FLOWER_KEY_CVLAN_PRIO, - TCA_FLOWER_KEY_CVLAN_ETH_TYPE, - &key->cvlan, &mask->cvlan); - fl_set_key_val(tb, &key->basic.n_proto, - TCA_FLOWER_KEY_CVLAN_ETH_TYPE, - &mask->basic.n_proto, - TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); - } else { - key->basic.n_proto = ethertype; - mask->basic.n_proto = cpu_to_be16(~0); - } - } - } else { - key->basic.n_proto = ethertype; - mask->basic.n_proto = cpu_to_be16(~0); + fl_set_key_val(tb, &key->num_of_vlans, + TCA_FLOWER_KEY_NUM_OF_VLANS, + &mask->num_of_vlans, + TCA_FLOWER_UNSPEC, + sizeof(key->num_of_vlans)); + + if (is_vlan_key(tb[TCA_FLOWER_KEY_ETH_TYPE], ðertype, key, mask, 0)) { + fl_set_key_vlan(tb, ethertype, TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, + &key->vlan, &mask->vlan); + + if (is_vlan_key(tb[TCA_FLOWER_KEY_VLAN_ETH_TYPE], + ðertype, key, mask, 1)) { + fl_set_key_vlan(tb, ethertype, + TCA_FLOWER_KEY_CVLAN_ID, + TCA_FLOWER_KEY_CVLAN_PRIO, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &key->cvlan, &mask->cvlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_CVLAN_ETH_TYPE, + &mask->basic.n_proto, + TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); } } @@ -1906,6 +1921,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FLOW_DISSECTOR_KEY_CT, ct); FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_HASH, hash); + FL_KEY_SET_IF_MASKED(mask, keys, cnt, + FLOW_DISSECTOR_KEY_NUM_OF_VLANS, num_of_vlans); skb_flow_dissector_init(dissector, keys, cnt); } @@ -2362,11 +2379,11 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, cls_flower.rule->match.mask = &f->mask->key; cls_flower.rule->match.key = &f->mkey; - err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts); + err = tc_setup_offload_action(&cls_flower.rule->action, &f->exts, + cls_flower.common.extack); if (err) { kfree(cls_flower.rule); if (tc_skip_sw(f->flags)) { - NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); __fl_put(f); return err; } @@ -2994,6 +3011,11 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, sizeof(key->basic.n_proto))) goto nla_put_failure; + if (mask->num_of_vlans.num_of_vlans) { + if (nla_put_u8(skb, TCA_FLOWER_KEY_NUM_OF_VLANS, key->num_of_vlans.num_of_vlans)) + goto nla_put_failure; + } + if (fl_dump_key_mpls(skb, &key->mpls, &mask->mpls)) goto nla_put_failure; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index ca5670fd5228..06cf22adbab7 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -97,16 +97,13 @@ static int mall_replace_hw_filter(struct tcf_proto *tp, cls_mall.command = TC_CLSMATCHALL_REPLACE; cls_mall.cookie = cookie; - err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts, + cls_mall.common.extack); if (err) { kfree(cls_mall.rule); mall_destroy_hw_filter(tp, head, cookie, NULL); - if (skip_sw) - NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); - else - err = 0; - return err; + return skip_sw ? err : 0; } err = tc_setup_cb_add(block, tp, TC_SETUP_CLSMATCHALL, &cls_mall, @@ -302,14 +299,12 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY; cls_mall.cookie = (unsigned long)head; - err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts); + err = tc_setup_offload_action(&cls_mall.rule->action, &head->exts, + cls_mall.common.extack); if (err) { kfree(cls_mall.rule); - if (add && tc_skip_sw(head->flags)) { - NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action"); - return err; - } - return 0; + + return add && tc_skip_sw(head->flags) ? err : 0; } err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c index 0a04468b7314..49bae3d5006b 100644 --- a/net/sched/em_meta.c +++ b/net/sched/em_meta.c @@ -311,12 +311,15 @@ META_COLLECTOR(int_sk_bound_if) META_COLLECTOR(var_sk_bound_if) { + int bound_dev_if; + if (skip_nonlocal(skb)) { *err = -1; return; } - if (skb->sk->sk_bound_dev_if == 0) { + bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if); + if (bound_dev_if == 0) { dst->value = (unsigned long) "any"; dst->len = 3; } else { @@ -324,7 +327,7 @@ META_COLLECTOR(var_sk_bound_if) rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(skb->sk), - skb->sk->sk_bound_dev_if); + bound_dev_if); *err = var_dev(dev, dst); rcu_read_unlock(); } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5bab9f8b8f45..dba0b3e24af5 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1019,22 +1019,14 @@ EXPORT_SYMBOL(qdisc_create_dflt); void qdisc_reset(struct Qdisc *qdisc) { const struct Qdisc_ops *ops = qdisc->ops; - struct sk_buff *skb, *tmp; trace_qdisc_reset(qdisc); if (ops->reset) ops->reset(qdisc); - skb_queue_walk_safe(&qdisc->gso_skb, skb, tmp) { - __skb_unlink(skb, &qdisc->gso_skb); - kfree_skb_list(skb); - } - - skb_queue_walk_safe(&qdisc->skb_bad_txq, skb, tmp) { - __skb_unlink(skb, &qdisc->skb_bad_txq); - kfree_skb_list(skb); - } + __skb_queue_purge(&qdisc->gso_skb); + __skb_queue_purge(&qdisc->skb_bad_txq); qdisc->q.qlen = 0; qdisc->qstats.backlog = 0; |