From 18a110b022a5c02e7dc9f6109d0bd93e58ac6ebb Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 15 Nov 2019 12:39:23 +0100 Subject: netfilter: ctnetlink: netns exit must wait for callbacks Curtis Taylor and Jon Maxwell reported and debugged a crash on 3.10 based kernel. Crash occurs in ctnetlink_conntrack_events because net->nfnl socket is NULL. The nfnl socket was set to NULL by netns destruction running on another cpu. The exiting network namespace calls the relevant destructors in the following order: 1. ctnetlink_net_exit_batch This nulls out the event callback pointer in struct netns. 2. nfnetlink_net_exit_batch This nulls net->nfnl socket and frees it. 3. nf_conntrack_cleanup_net_list This removes all remaining conntrack entries. This is order is correct. The only explanation for the crash so ar is: cpu1: conntrack is dying, eviction occurs: -> nf_ct_delete() -> nf_conntrack_event_report \ -> nf_conntrack_eventmask_report -> notify->fcn() (== ctnetlink_conntrack_events). cpu1: a. fetches rcu protected pointer to obtain ctnetlink event callback. b. gets interrupted. cpu2: runs netns exit handlers: a runs ctnetlink destructor, event cb pointer set to NULL. b runs nfnetlink destructor, nfnl socket is closed and set to NULL. cpu1: c. resumes and trips over NULL net->nfnl. Problem appears to be that ctnetlink_net_exit_batch only prevents future callers of nf_conntrack_eventmask_report() from obtaining the callback. It doesn't wait of other cpus that might have already obtained the callbacks address. I don't see anything in upstream kernels that would prevent similar crash: We need to wait for all cpus to have exited the event callback. Fixes: 9592a5c01e79dbc59eb56fa ("netfilter: ctnetlink: netns support") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_netlink.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index d8d33ef52ce0..6a1c8f1f6171 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -3626,6 +3626,9 @@ static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list) list_for_each_entry(net, net_exit_list, exit_list) ctnetlink_net_exit(net); + + /* wait for other cpus until they are done with ctnl_notifiers */ + synchronize_rcu(); } static struct pernet_operations ctnetlink_net_ops = { -- cgit v1.2.3 From ee1bcfe01251e8958b53de45d2b0c85e09dd2719 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 20 Nov 2019 12:30:42 +0800 Subject: netfilter: nf_flow_table_offload: Fix block setup as TC_SETUP_FT cmd Set up block through TC_SETUP_FT command. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Signed-off-by: wenxu Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index c54c9a6cc981..6067268ab9bc 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -822,7 +822,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, bo.extack = &extack; INIT_LIST_HEAD(&bo.cb_list); - err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_BLOCK, &bo); + err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, &bo); if (err < 0) return err; -- cgit v1.2.3 From e052901991ae21e15851bfc89c682bfcb39a4dcf Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 20 Nov 2019 13:12:22 +0800 Subject: netfilter: nf_flow_table_offload: Fix block_cb tc_setup_type as TC_SETUP_CLSFLOWER Add/del/stats flows through block_cb call must set the tc_setup_type as TC_SETUP_CLSFLOWER. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Signed-off-by: wenxu Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index 6067268ab9bc..b3ad285e057d 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -574,7 +574,7 @@ static int flow_offload_tuple_add(struct flow_offload_work *offload, cls_flow.rule = flow_rule->rule; list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) { - err = block_cb->cb(TC_SETUP_FT, &cls_flow, + err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); if (err < 0) continue; @@ -599,7 +599,7 @@ static void flow_offload_tuple_del(struct flow_offload_work *offload, &offload->flow->tuplehash[dir].tuple, &extack); list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) - block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv); + block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); offload->flow->flags |= FLOW_OFFLOAD_HW_DEAD; } @@ -656,7 +656,7 @@ static void flow_offload_tuple_stats(struct flow_offload_work *offload, &offload->flow->tuplehash[dir].tuple, &extack); list_for_each_entry(block_cb, &flowtable->flow_block.cb_list, list) - block_cb->cb(TC_SETUP_FT, &cls_flow, block_cb->cb_priv); + block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow, block_cb->cb_priv); memcpy(stats, &cls_flow.stats, sizeof(*stats)); } -- cgit v1.2.3 From dc4d3f2e12a6e8a22d13b0d648a6529e403b65a6 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Tue, 26 Nov 2019 13:12:26 -0700 Subject: netfilter: nf_flow_table_offload: Don't use offset uninitialized in flow_offload_port_{d,s}nat Clang warns (trimmed the second warning for brevity): ../net/netfilter/nf_flow_table_offload.c:342:2: warning: variable 'offset' is used uninitialized whenever switch default is taken [-Wsometimes-uninitialized] default: ^~~~~~~ ../net/netfilter/nf_flow_table_offload.c:346:57: note: uninitialized use occurs here flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, ^~~~~~ ../net/netfilter/nf_flow_table_offload.c:331:12: note: initialize the variable 'offset' to silence this warning u32 offset; ^ = 0 Match what was done in the flow_offload_ipv{4,6}_{d,s}nat functions and just return in the default case, since port would also be uninitialized. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Link: https://github.com/ClangBuiltLinux/linux/issues/780 Signed-off-by: Nathan Chancellor Reported-by: kernelci.org bot Reviewed-by: Nick Desaulniers Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index b3ad285e057d..dd78ae5441e9 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -340,7 +340,7 @@ static void flow_offload_port_snat(struct net *net, offset = 0; /* offsetof(struct tcphdr, dest); */ break; default: - break; + return; } flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, @@ -367,7 +367,7 @@ static void flow_offload_port_dnat(struct net *net, offset = 0; /* offsetof(struct tcphdr, dest); */ break; default: - break; + return; } flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, -- cgit v1.2.3 From c7c17e6a03e08860f7a40095643e72c24c3f896b Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 28 Nov 2019 13:25:48 +0100 Subject: netfilter: conntrack: tell compiler to not inline nf_ct_resolve_clash At this time compiler inlines it, but this code will not be executed under normal conditions. Also, no inlining allows to use "nf_ct_resolve_clash%return" perf probe. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_core.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0af1898af2b8..f475fec84536 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -895,9 +895,10 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo, } /* Resolve race on insertion if this protocol allows this. */ -static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, - enum ip_conntrack_info ctinfo, - struct nf_conntrack_tuple_hash *h) +static __cold noinline int +nf_ct_resolve_clash(struct net *net, struct sk_buff *skb, + enum ip_conntrack_info ctinfo, + struct nf_conntrack_tuple_hash *h) { /* This is the conntrack entry already in hashes that won race. */ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); -- cgit v1.2.3 From d50264f1fef92967e2d2c9d91bdb05de214ddacf Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 29 Nov 2019 10:07:01 +0100 Subject: netfilter: nf_flow_table_offload: add IPv6 match description Add missing IPv6 matching description to flow_rule object. Fixes: 5c27d8d76ce8 ("netfilter: nf_flow_table_offload: add IPv6 support") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index dd78ae5441e9..c94ebad78c5c 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -28,6 +28,7 @@ struct nf_flow_key { struct flow_dissector_key_basic basic; union { struct flow_dissector_key_ipv4_addrs ipv4; + struct flow_dissector_key_ipv6_addrs ipv6; }; struct flow_dissector_key_tcp tcp; struct flow_dissector_key_ports tp; @@ -57,6 +58,7 @@ static int nf_flow_rule_match(struct nf_flow_match *match, NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); + NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp); NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp); @@ -69,9 +71,18 @@ static int nf_flow_rule_match(struct nf_flow_match *match, key->ipv4.dst = tuple->dst_v4.s_addr; mask->ipv4.dst = 0xffffffff; break; + case AF_INET6: + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; + key->basic.n_proto = htons(ETH_P_IPV6); + key->ipv6.src = tuple->src_v6; + memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src)); + key->ipv6.dst = tuple->dst_v6; + memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst)); + break; default: return -EOPNOTSUPP; } + match->dissector.used_keys |= BIT(key->control.addr_type); mask->basic.n_proto = 0xffff; switch (tuple->l4proto) { @@ -96,7 +107,6 @@ static int nf_flow_rule_match(struct nf_flow_match *match, match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | - BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | BIT(FLOW_DISSECTOR_KEY_PORTS); return 0; } -- cgit v1.2.3 From d1f4c966475c6dd2545c6625022cb24e878bee11 Mon Sep 17 00:00:00 2001 From: wenxu Date: Wed, 13 Nov 2019 12:21:07 +0800 Subject: netfilter: nf_tables_offload: Check for the NETDEV_UNREGISTER event Check for the NETDEV_UNREGISTER event from the nft_offload_netdev_event function, which is the event that actually triggers the clean up. Fixes: 06d392cbe3db ("netfilter: nf_tables_offload: remove rules when the device unregisters") Signed-off-by: wenxu Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_offload.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 68f17a6921d8..d7a35da008ef 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -577,6 +577,9 @@ static int nft_offload_netdev_event(struct notifier_block *this, struct net *net = dev_net(dev); struct nft_chain *chain; + if (event != NETDEV_UNREGISTER) + return NOTIFY_DONE; + mutex_lock(&net->nft.commit_mutex); chain = __nft_offload_get_chain(dev); if (chain) -- cgit v1.2.3 From 0b9173f4688dfa7c5d723426be1d979c24ce3d51 Mon Sep 17 00:00:00 2001 From: Marco Oliverio Date: Mon, 2 Dec 2019 19:54:30 +0100 Subject: netfilter: nf_queue: enqueue skbs with NULL dst Bridge packets that are forwarded have skb->dst == NULL and get dropped by the check introduced by b60a77386b1d4868f72f6353d35dabe5fbe981f2 (net: make skb_dst_force return true when dst is refcounted). To fix this we check skb_dst() before skb_dst_force(), so we don't drop skb packet with dst == NULL. This holds also for skb at the PRE_ROUTING hook so we remove the second check. Fixes: b60a77386b1d ("net: make skb_dst_force return true when dst is refcounted") Signed-off-by: Marco Oliverio Signed-off-by: Rocco Folino Acked-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_queue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index a2b58de82600..f8f52ff99cfb 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -189,7 +189,7 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, goto err; } - if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) { + if (skb_dst(skb) && !skb_dst_force(skb)) { status = -ENETDOWN; goto err; } -- cgit v1.2.3 From db3b665dd77b34e34df00e17d7b299c98fcfb2c5 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 6 Dec 2019 20:23:29 +0100 Subject: netfilter: nft_set_rbtree: bogus lookup/get on consecutive elements in named sets The existing rbtree implementation might store consecutive elements where the closing element and the opening element might overlap, eg. [ a, a+1) [ a+1, a+2) This patch removes the optimization for non-anonymous sets in the exact matching case, where it is assumed to stop searching in case that the closing element is found. Instead, invalidate candidate interval and keep looking further in the tree. The lookup/get operation might return false, while there is an element in the rbtree. Moreover, the get operation returns true as if a+2 would be in the tree. This happens with named sets after several set updates. The existing lookup optimization (that only works for the anonymous sets) might not reach the opening [ a+1,... element if the closing ...,a+1) is found in first place when walking over the rbtree. Hence, walking the full tree in that case is needed. This patch fixes the lookup and get operations. Fixes: e701001e7cbe ("netfilter: nft_rbtree: allow adjacent intervals with dynamic updates") Fixes: ba0e4d9917b4 ("netfilter: nf_tables: get set elements via netlink") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_set_rbtree.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'net') diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 57123259452f..a9f804f7a04a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -74,8 +74,13 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set parent = rcu_dereference_raw(parent->rb_left); continue; } - if (nft_rbtree_interval_end(rbe)) - goto out; + if (nft_rbtree_interval_end(rbe)) { + if (nft_set_is_anonymous(set)) + return false; + parent = rcu_dereference_raw(parent->rb_left); + interval = NULL; + continue; + } *ext = &rbe->ext; return true; @@ -88,7 +93,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set *ext = &interval->ext; return true; } -out: + return false; } @@ -139,8 +144,10 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, if (flags & NFT_SET_ELEM_INTERVAL_END) interval = rbe; } else { - if (!nft_set_elem_active(&rbe->ext, genmask)) + if (!nft_set_elem_active(&rbe->ext, genmask)) { parent = rcu_dereference_raw(parent->rb_left); + continue; + } if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) || (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) == @@ -148,7 +155,11 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set, *elem = rbe; return true; } - return false; + + if (nft_rbtree_interval_end(rbe)) + interval = NULL; + + parent = rcu_dereference_raw(parent->rb_left); } } -- cgit v1.2.3 From bffc124b6fe37d0ae9b428d104efb426403bb5c9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 6 Dec 2019 21:55:20 +0100 Subject: netfilter: nf_tables: validate NFT_SET_ELEM_INTERVAL_END Only NFTA_SET_ELEM_KEY and NFTA_SET_ELEM_FLAGS make sense for elements whose NFT_SET_ELEM_INTERVAL_END flag is set on. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 062b73a83af0..0db2784fee9a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4756,14 +4756,20 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (nla[NFTA_SET_ELEM_DATA] == NULL && !(flags & NFT_SET_ELEM_INTERVAL_END)) return -EINVAL; - if (nla[NFTA_SET_ELEM_DATA] != NULL && - flags & NFT_SET_ELEM_INTERVAL_END) - return -EINVAL; } else { if (nla[NFTA_SET_ELEM_DATA] != NULL) return -EINVAL; } + if ((flags & NFT_SET_ELEM_INTERVAL_END) && + (nla[NFTA_SET_ELEM_DATA] || + nla[NFTA_SET_ELEM_OBJREF] || + nla[NFTA_SET_ELEM_TIMEOUT] || + nla[NFTA_SET_ELEM_EXPIRATION] || + nla[NFTA_SET_ELEM_USERDATA] || + nla[NFTA_SET_ELEM_EXPR])) + return -EINVAL; + timeout = 0; if (nla[NFTA_SET_ELEM_TIMEOUT] != NULL) { if (!(set->flags & NFT_SET_TIMEOUT)) -- cgit v1.2.3 From 0d2c96af797ba149e559c5875c0151384ab6dd14 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 6 Dec 2019 22:09:14 +0100 Subject: netfilter: nf_tables: validate NFT_DATA_VALUE after nft_data_init() Userspace might bogusly sent NFT_DATA_VERDICT in several netlink attributes that assume NFT_DATA_VALUE. Moreover, make sure that error path invokes nft_data_release() to decrement the reference count on the chain object. Fixes: 96518518cc41 ("netfilter: add nftables") Fixes: 0f3cd9b36977 ("netfilter: nf_tables: add range expression") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 4 +++- net/netfilter/nft_bitwise.c | 4 ++-- net/netfilter/nft_cmp.c | 6 ++++++ net/netfilter/nft_range.c | 10 ++++++++++ 4 files changed, 21 insertions(+), 3 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 0db2784fee9a..72a7816ba761 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4519,8 +4519,10 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set, return err; err = -EINVAL; - if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) + if (desc.type != NFT_DATA_VALUE || desc.len != set->klen) { + nft_data_release(&elem.key.val, desc.type); return err; + } priv = set->ops->get(ctx->net, set, &elem, flags); if (IS_ERR(priv)) diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c index 02afa752dd2e..10e9d50e4e19 100644 --- a/net/netfilter/nft_bitwise.c +++ b/net/netfilter/nft_bitwise.c @@ -80,7 +80,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_MASK]); if (err < 0) return err; - if (d1.len != priv->len) { + if (d1.type != NFT_DATA_VALUE || d1.len != priv->len) { err = -EINVAL; goto err1; } @@ -89,7 +89,7 @@ static int nft_bitwise_init(const struct nft_ctx *ctx, tb[NFTA_BITWISE_XOR]); if (err < 0) goto err1; - if (d2.len != priv->len) { + if (d2.type != NFT_DATA_VALUE || d2.len != priv->len) { err = -EINVAL; goto err2; } diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c index b8092069f868..8a28c127effc 100644 --- a/net/netfilter/nft_cmp.c +++ b/net/netfilter/nft_cmp.c @@ -81,6 +81,12 @@ static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr, if (err < 0) return err; + if (desc.type != NFT_DATA_VALUE) { + err = -EINVAL; + nft_data_release(&priv->data, desc.type); + return err; + } + priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]); err = nft_validate_register_load(priv->sreg, desc.len); if (err < 0) diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c index 4701fa8a45e7..89efcc5a533d 100644 --- a/net/netfilter/nft_range.c +++ b/net/netfilter/nft_range.c @@ -66,11 +66,21 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr if (err < 0) return err; + if (desc_from.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err1; + } + err = nft_data_init(NULL, &priv->data_to, sizeof(priv->data_to), &desc_to, tb[NFTA_RANGE_TO_DATA]); if (err < 0) goto err1; + if (desc_to.type != NFT_DATA_VALUE) { + err = -EINVAL; + goto err2; + } + if (desc_from.len != desc_to.len) { err = -EINVAL; goto err2; -- cgit v1.2.3 From fd57d0cbe187e93f63777d36e9f49293311d417f Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 6 Dec 2019 22:25:55 +0100 Subject: netfilter: nf_tables: skip module reference count bump on object updates Use __nft_obj_type_get() instead, otherwise there is a module reference counter leak. Fixes: d62d0ba97b58 ("netfilter: nf_tables: Introduce stateful object update operation") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net') diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 72a7816ba761..a8caf7386fa9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5484,7 +5484,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, if (nlh->nlmsg_flags & NLM_F_REPLACE) return -EOPNOTSUPP; - type = nft_obj_type_get(net, objtype); + type = __nft_obj_type_get(objtype); nft_ctx_init(&ctx, net, skb, nlh, family, table, NULL, nla); return nf_tables_updobj(&ctx, type, nla[NFTA_OBJ_DATA], obj); -- cgit v1.2.3 From 81ec61074bcf68acfcb2820cda3ff9d9984419c7 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 6 Dec 2019 22:49:58 +0100 Subject: netfilter: nf_tables_offload: return EOPNOTSUPP if rule specifies no actions If the rule only specifies the matching side, return EOPNOTSUPP. Otherwise, the front-end relies on the drivers to reject this rule. Fixes: c9626a2cbdb2 ("netfilter: nf_tables: add hardware offload support") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_offload.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index d7a35da008ef..22fb18906ccf 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -44,6 +44,9 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, expr = nft_expr_next(expr); } + if (num_actions == 0) + return ERR_PTR(-EOPNOTSUPP); + flow = nft_flow_rule_alloc(num_actions); if (!flow) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From 5604285839aaedfb23ebe297799c6e558939334d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 7 Dec 2019 14:43:39 -0800 Subject: netfilter: bridge: make sure to pull arp header in br_nf_forward_arp() syzbot is kind enough to remind us we need to call skb_may_pull() BUG: KMSAN: uninit-value in br_nf_forward_arp+0xe61/0x1230 net/bridge/br_netfilter_hooks.c:665 CPU: 1 PID: 11631 Comm: syz-executor.1 Not tainted 5.4.0-rc8-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x1c9/0x220 lib/dump_stack.c:118 kmsan_report+0x128/0x220 mm/kmsan/kmsan_report.c:108 __msan_warning+0x64/0xc0 mm/kmsan/kmsan_instr.c:245 br_nf_forward_arp+0xe61/0x1230 net/bridge/br_netfilter_hooks.c:665 nf_hook_entry_hookfn include/linux/netfilter.h:135 [inline] nf_hook_slow+0x18b/0x3f0 net/netfilter/core.c:512 nf_hook include/linux/netfilter.h:260 [inline] NF_HOOK include/linux/netfilter.h:303 [inline] __br_forward+0x78f/0xe30 net/bridge/br_forward.c:109 br_flood+0xef0/0xfe0 net/bridge/br_forward.c:234 br_handle_frame_finish+0x1a77/0x1c20 net/bridge/br_input.c:162 nf_hook_bridge_pre net/bridge/br_input.c:245 [inline] br_handle_frame+0xfb6/0x1eb0 net/bridge/br_input.c:348 __netif_receive_skb_core+0x20b9/0x51a0 net/core/dev.c:4830 __netif_receive_skb_one_core net/core/dev.c:4927 [inline] __netif_receive_skb net/core/dev.c:5043 [inline] process_backlog+0x610/0x13c0 net/core/dev.c:5874 napi_poll net/core/dev.c:6311 [inline] net_rx_action+0x7a6/0x1aa0 net/core/dev.c:6379 __do_softirq+0x4a1/0x83a kernel/softirq.c:293 do_softirq_own_stack+0x49/0x80 arch/x86/entry/entry_64.S:1091 do_softirq kernel/softirq.c:338 [inline] __local_bh_enable_ip+0x184/0x1d0 kernel/softirq.c:190 local_bh_enable+0x36/0x40 include/linux/bottom_half.h:32 rcu_read_unlock_bh include/linux/rcupdate.h:688 [inline] __dev_queue_xmit+0x38e8/0x4200 net/core/dev.c:3819 dev_queue_xmit+0x4b/0x60 net/core/dev.c:3825 packet_snd net/packet/af_packet.c:2959 [inline] packet_sendmsg+0x8234/0x9100 net/packet/af_packet.c:2984 sock_sendmsg_nosec net/socket.c:637 [inline] sock_sendmsg net/socket.c:657 [inline] __sys_sendto+0xc44/0xc70 net/socket.c:1952 __do_sys_sendto net/socket.c:1964 [inline] __se_sys_sendto+0x107/0x130 net/socket.c:1960 __x64_sys_sendto+0x6e/0x90 net/socket.c:1960 do_syscall_64+0xb6/0x160 arch/x86/entry/common.c:291 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x45a679 Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 RSP: 002b:00007f0a3c9e5c78 EFLAGS: 00000246 ORIG_RAX: 000000000000002c RAX: ffffffffffffffda RBX: 0000000000000006 RCX: 000000000045a679 RDX: 000000000000000e RSI: 0000000020000200 RDI: 0000000000000003 RBP: 000000000075bf20 R08: 00000000200000c0 R09: 0000000000000014 R10: 0000000000000000 R11: 0000000000000246 R12: 00007f0a3c9e66d4 R13: 00000000004c8ec1 R14: 00000000004dfe28 R15: 00000000ffffffff Uninit was created at: kmsan_save_stack_with_flags mm/kmsan/kmsan.c:149 [inline] kmsan_internal_poison_shadow+0x5c/0x110 mm/kmsan/kmsan.c:132 kmsan_slab_alloc+0x97/0x100 mm/kmsan/kmsan_hooks.c:86 slab_alloc_node mm/slub.c:2773 [inline] __kmalloc_node_track_caller+0xe27/0x11a0 mm/slub.c:4381 __kmalloc_reserve net/core/skbuff.c:141 [inline] __alloc_skb+0x306/0xa10 net/core/skbuff.c:209 alloc_skb include/linux/skbuff.h:1049 [inline] alloc_skb_with_frags+0x18c/0xa80 net/core/skbuff.c:5662 sock_alloc_send_pskb+0xafd/0x10a0 net/core/sock.c:2244 packet_alloc_skb net/packet/af_packet.c:2807 [inline] packet_snd net/packet/af_packet.c:2902 [inline] packet_sendmsg+0x63a6/0x9100 net/packet/af_packet.c:2984 sock_sendmsg_nosec net/socket.c:637 [inline] sock_sendmsg net/socket.c:657 [inline] __sys_sendto+0xc44/0xc70 net/socket.c:1952 __do_sys_sendto net/socket.c:1964 [inline] __se_sys_sendto+0x107/0x130 net/socket.c:1960 __x64_sys_sendto+0x6e/0x90 net/socket.c:1960 do_syscall_64+0xb6/0x160 arch/x86/entry/common.c:291 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Fixes: c4e70a87d975 ("netfilter: bridge: rename br_netfilter.c to br_netfilter_hooks.c") Signed-off-by: Eric Dumazet Reported-by: syzbot Reviewed-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/bridge/br_netfilter_hooks.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'net') diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index af7800103e51..59980ecfc962 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -662,6 +662,9 @@ static unsigned int br_nf_forward_arp(void *priv, nf_bridge_pull_encap_header(skb); } + if (unlikely(!pskb_may_pull(skb, sizeof(struct arphdr)))) + return NF_DROP; + if (arp_hdr(skb)->ar_pln != 4) { if (is_vlan_arp(skb, state->net)) nf_bridge_push_encap_header(skb); -- cgit v1.2.3 From 7acd9378dc65296b2531758aa62ee9bcf55b371c Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sat, 7 Dec 2019 18:38:12 +0100 Subject: netfilter: nf_flow_table_offload: Correct memcpy size for flow_overload_mangle() In function 'memcpy', inlined from 'flow_offload_mangle' at net/netfilter/nf_flow_table_offload.c:112:2, inlined from 'flow_offload_port_dnat' at net/netfilter/nf_flow_table_offload.c:373:2, inlined from 'nf_flow_rule_route_ipv4' at net/netfilter/nf_flow_table_offload.c:424:3: ./include/linux/string.h:376:4: error: call to '__read_overflow2' declared with attribute error: detected read beyond size of object passed as 2nd parameter 376 | __read_overflow2(); | ^~~~~~~~~~~~~~~~~~ The original u8* was done in the hope to make this more adaptable but consensus is to keep this like it is in tc pedit. Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support") Reported-by: Laura Abbott Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_offload.c | 59 +++++++++++++++++------------------ 1 file changed, 28 insertions(+), 31 deletions(-) (limited to 'net') diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index c94ebad78c5c..de7a0d1e15c8 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -112,8 +112,8 @@ static int nf_flow_rule_match(struct nf_flow_match *match, } static void flow_offload_mangle(struct flow_action_entry *entry, - enum flow_action_mangle_base htype, - u32 offset, u8 *value, u8 *mask) + enum flow_action_mangle_base htype, u32 offset, + const __be32 *value, const __be32 *mask) { entry->id = FLOW_ACTION_MANGLE; entry->mangle.htype = htype; @@ -150,12 +150,12 @@ static int flow_offload_eth_src(struct net *net, memcpy(&val16, dev->dev_addr, 2); val = val16 << 16; flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, - (u8 *)&val, (u8 *)&mask); + &val, &mask); mask = ~0xffffffff; memcpy(&val, dev->dev_addr + 2, 4); flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8, - (u8 *)&val, (u8 *)&mask); + &val, &mask); dev_put(dev); return 0; @@ -180,13 +180,13 @@ static int flow_offload_eth_dst(struct net *net, mask = ~0xffffffff; memcpy(&val, n->ha, 4); flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0, - (u8 *)&val, (u8 *)&mask); + &val, &mask); mask = ~0x0000ffff; memcpy(&val16, n->ha + 4, 2); val = val16; flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4, - (u8 *)&val, (u8 *)&mask); + &val, &mask); neigh_release(n); return 0; @@ -216,7 +216,7 @@ static void flow_offload_ipv4_snat(struct net *net, } flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, - (u8 *)&addr, (u8 *)&mask); + &addr, &mask); } static void flow_offload_ipv4_dnat(struct net *net, @@ -243,12 +243,12 @@ static void flow_offload_ipv4_dnat(struct net *net, } flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset, - (u8 *)&addr, (u8 *)&mask); + &addr, &mask); } static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, unsigned int offset, - u8 *addr, u8 *mask) + const __be32 *addr, const __be32 *mask) { struct flow_action_entry *entry; int i; @@ -256,8 +256,7 @@ static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule, for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i += sizeof(u32)) { entry = flow_action_entry_next(flow_rule); flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6, - offset + i, - &addr[i], mask); + offset + i, &addr[i], mask); } } @@ -267,23 +266,23 @@ static void flow_offload_ipv6_snat(struct net *net, struct nf_flow_rule *flow_rule) { u32 mask = ~htonl(0xffffffff); - const u8 *addr; + const __be32 *addr; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32; offset = offsetof(struct ipv6hdr, saddr); break; case FLOW_OFFLOAD_DIR_REPLY: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32; offset = offsetof(struct ipv6hdr, daddr); break; default: return; } - flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); + flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask); } static void flow_offload_ipv6_dnat(struct net *net, @@ -292,23 +291,23 @@ static void flow_offload_ipv6_dnat(struct net *net, struct nf_flow_rule *flow_rule) { u32 mask = ~htonl(0xffffffff); - const u8 *addr; + const __be32 *addr; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32; offset = offsetof(struct ipv6hdr, daddr); break; case FLOW_OFFLOAD_DIR_REPLY: - addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr; + addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32; offset = offsetof(struct ipv6hdr, saddr); break; default: return; } - flow_offload_ipv6_mangle(flow_rule, offset, (u8 *)addr, (u8 *)&mask); + flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask); } static int flow_offload_l4proto(const struct flow_offload *flow) @@ -336,25 +335,24 @@ static void flow_offload_port_snat(struct net *net, struct nf_flow_rule *flow_rule) { struct flow_action_entry *entry = flow_action_entry_next(flow_rule); - u32 mask = ~htonl(0xffff0000); - __be16 port; + u32 mask = ~htonl(0xffff0000), port; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); offset = 0; /* offsetof(struct tcphdr, source); */ break; case FLOW_OFFLOAD_DIR_REPLY: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); offset = 0; /* offsetof(struct tcphdr, dest); */ break; default: return; } - + port = htonl(port << 16); flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, - (u8 *)&port, (u8 *)&mask); + &port, &mask); } static void flow_offload_port_dnat(struct net *net, @@ -363,25 +361,24 @@ static void flow_offload_port_dnat(struct net *net, struct nf_flow_rule *flow_rule) { struct flow_action_entry *entry = flow_action_entry_next(flow_rule); - u32 mask = ~htonl(0xffff); - __be16 port; + u32 mask = ~htonl(0xffff), port; u32 offset; switch (dir) { case FLOW_OFFLOAD_DIR_ORIGINAL: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port); offset = 0; /* offsetof(struct tcphdr, source); */ break; case FLOW_OFFLOAD_DIR_REPLY: - port = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port; + port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port); offset = 0; /* offsetof(struct tcphdr, dest); */ break; default: return; } - + port = htonl(port); flow_offload_mangle(entry, flow_offload_l4proto(flow), offset, - (u8 *)&port, (u8 *)&mask); + &port, &mask); } static void flow_offload_ipv4_checksum(struct net *net, -- cgit v1.2.3