diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-09 02:15:08 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-09 02:15:08 +0100 |
commit | 291abfea4746897b821830e0189dc225abd401eb (patch) | |
tree | cbdf93a2ade8842c80a2272fae2088f33c09b336 /net | |
parent | Merge tag 'powerpc-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/po... (diff) | |
parent | net: thunderx: use proper interface type for RGMII (diff) | |
download | linux-291abfea4746897b821830e0189dc225abd401eb.tar.xz linux-291abfea4746897b821830e0189dc225abd401eb.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller:
1) Unbalanced locking in mwifiex_process_country_ie, from Brian Norris.
2) Fix thermal zone registration in iwlwifi, from Andrei
Otcheretianski.
3) Fix double free_irq in sgi ioc3 eth, from Thomas Bogendoerfer.
4) Use after free in mptcp, from Florian Westphal.
5) Use after free in wireguard's root_remove_peer_lists, from Eric
Dumazet.
6) Properly access packets heads in bonding alb code, from Eric
Dumazet.
7) Fix data race in skb_queue_len(), from Qian Cai.
8) Fix regression in r8169 on some chips, from Heiner Kallweit.
9) Fix XDP program ref counting in hv_netvsc, from Haiyang Zhang.
10) Certain kinds of set link netlink operations can cause a NULL deref
in the ipv6 addrconf code. Fix from Eric Dumazet.
11) Don't cancel uninitialized work queue in drop monitor, from Ido
Schimmel.
* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (84 commits)
net: thunderx: use proper interface type for RGMII
mt76: mt7615: fix max_nss in mt7615_eeprom_parse_hw_cap
bpf: Improve bucket_log calculation logic
selftests/bpf: Test freeing sockmap/sockhash with a socket in it
bpf, sockhash: Synchronize_rcu before free'ing map
bpf, sockmap: Don't sleep while holding RCU lock on tear-down
bpftool: Don't crash on missing xlated program instructions
bpf, sockmap: Check update requirements after locking
drop_monitor: Do not cancel uninitialized work item
mlxsw: spectrum_dpipe: Add missing error path
mlxsw: core: Add validation of hardware device types for MGPIR register
mlxsw: spectrum_router: Clear offload indication from IPv6 nexthops on abort
selftests: mlxsw: Add test cases for local table route replacement
mlxsw: spectrum_router: Prevent incorrect replacement of local table routes
net: dsa: microchip: enable module autoprobe
ipv6/addrconf: fix potential NULL deref in inet6_set_link_af()
dpaa_eth: support all modes with rate adapting PHYs
net: stmmac: update pci platform data to use phy_interface
net: stmmac: xgmac: fix missing IFF_MULTICAST checki in dwxgmac2_set_filter
net: stmmac: fix missing IFF_MULTICAST check in dwmac4_set_filter
...
Diffstat (limited to 'net')
-rw-r--r-- | net/core/bpf_sk_storage.c | 5 | ||||
-rw-r--r-- | net/core/devlink.c | 6 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 4 | ||||
-rw-r--r-- | net/core/sock_map.c | 28 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 3 | ||||
-rw-r--r-- | net/mptcp/protocol.c | 106 | ||||
-rw-r--r-- | net/rxrpc/call_object.c | 22 | ||||
-rw-r--r-- | net/rxrpc/conn_object.c | 3 | ||||
-rw-r--r-- | net/sched/cls_tcindex.c | 3 | ||||
-rw-r--r-- | net/sched/sch_fq_pie.c | 2 | ||||
-rw-r--r-- | net/sched/sch_taprio.c | 92 | ||||
-rw-r--r-- | net/unix/af_unix.c | 11 |
12 files changed, 157 insertions, 128 deletions
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 458be6b3eda9..3ab23f698221 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -643,9 +643,10 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) return ERR_PTR(-ENOMEM); bpf_map_init_from_attr(&smap->map, attr); + nbuckets = roundup_pow_of_two(num_possible_cpus()); /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ - smap->bucket_log = max_t(u32, 1, ilog2(roundup_pow_of_two(num_possible_cpus()))); - nbuckets = 1U << smap->bucket_log; + nbuckets = max_t(u32, 2, nbuckets); + smap->bucket_log = ilog2(nbuckets); cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); ret = bpf_map_charge_init(&smap->map.memory, cost); diff --git a/net/core/devlink.c b/net/core/devlink.c index ca1df0ec3c97..549ee56b7a21 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c @@ -3986,6 +3986,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb, goto out_unlock; } + /* return 0 if there is no further data to read */ + if (start_offset >= region->size) { + err = 0; + goto out_unlock; + } + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &devlink_nl_family, NLM_F_ACK | NLM_F_MULTI, DEVLINK_CMD_REGION_READ); diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ea46fc6aa883..31700e0c3928 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -1000,8 +1000,10 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack) { int cpu; - if (!monitor_hw) + if (!monitor_hw) { NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already disabled"); + return; + } monitor_hw = false; diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 8998e356f423..085cef5857bb 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -234,7 +234,6 @@ static void sock_map_free(struct bpf_map *map) int i; synchronize_rcu(); - rcu_read_lock(); raw_spin_lock_bh(&stab->lock); for (i = 0; i < stab->map.max_entries; i++) { struct sock **psk = &stab->sks[i]; @@ -243,13 +242,15 @@ static void sock_map_free(struct bpf_map *map) sk = xchg(psk, NULL); if (sk) { lock_sock(sk); + rcu_read_lock(); sock_map_unref(sk, psk); + rcu_read_unlock(); release_sock(sk); } } raw_spin_unlock_bh(&stab->lock); - rcu_read_unlock(); + /* wait for psock readers accessing its map link */ synchronize_rcu(); bpf_map_area_free(stab->sks); @@ -416,14 +417,16 @@ static int sock_map_update_elem(struct bpf_map *map, void *key, ret = -EINVAL; goto out; } - if (!sock_map_sk_is_suitable(sk) || - sk->sk_state != TCP_ESTABLISHED) { + if (!sock_map_sk_is_suitable(sk)) { ret = -EOPNOTSUPP; goto out; } sock_map_sk_acquire(sk); - ret = sock_map_update_common(map, idx, sk, flags); + if (sk->sk_state != TCP_ESTABLISHED) + ret = -EOPNOTSUPP; + else + ret = sock_map_update_common(map, idx, sk, flags); sock_map_sk_release(sk); out: fput(sock->file); @@ -739,14 +742,16 @@ static int sock_hash_update_elem(struct bpf_map *map, void *key, ret = -EINVAL; goto out; } - if (!sock_map_sk_is_suitable(sk) || - sk->sk_state != TCP_ESTABLISHED) { + if (!sock_map_sk_is_suitable(sk)) { ret = -EOPNOTSUPP; goto out; } sock_map_sk_acquire(sk); - ret = sock_hash_update_common(map, key, sk, flags); + if (sk->sk_state != TCP_ESTABLISHED) + ret = -EOPNOTSUPP; + else + ret = sock_hash_update_common(map, key, sk, flags); sock_map_sk_release(sk); out: fput(sock->file); @@ -859,19 +864,22 @@ static void sock_hash_free(struct bpf_map *map) int i; synchronize_rcu(); - rcu_read_lock(); for (i = 0; i < htab->buckets_num; i++) { bucket = sock_hash_select_bucket(htab, i); raw_spin_lock_bh(&bucket->lock); hlist_for_each_entry_safe(elem, node, &bucket->head, node) { hlist_del_rcu(&elem->node); lock_sock(elem->sk); + rcu_read_lock(); sock_map_unref(elem->sk, elem); + rcu_read_unlock(); release_sock(elem->sk); } raw_spin_unlock_bh(&bucket->lock); } - rcu_read_unlock(); + + /* wait for psock readers accessing its map link */ + synchronize_rcu(); bpf_map_area_free(htab->buckets); kfree(htab); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 39d861d00377..cb493e15959c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5718,6 +5718,9 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla) struct nlattr *tb[IFLA_INET6_MAX + 1]; int err; + if (!idev) + return -EAFNOSUPPORT; + if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0) BUG(); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3bccee455688..73780b4cb108 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -24,57 +24,12 @@ #define MPTCP_SAME_STATE TCP_MAX_STATES -static void __mptcp_close(struct sock *sk, long timeout); - -static const struct proto_ops *tcp_proto_ops(struct sock *sk) -{ #if IS_ENABLED(CONFIG_MPTCP_IPV6) - if (sk->sk_family == AF_INET6) - return &inet6_stream_ops; +struct mptcp6_sock { + struct mptcp_sock msk; + struct ipv6_pinfo np; +}; #endif - return &inet_stream_ops; -} - -/* MP_CAPABLE handshake failed, convert msk to plain tcp, replacing - * socket->sk and stream ops and destroying msk - * return the msk socket, as we can't access msk anymore after this function - * completes - * Called with msk lock held, releases such lock before returning - */ -static struct socket *__mptcp_fallback_to_tcp(struct mptcp_sock *msk, - struct sock *ssk) -{ - struct mptcp_subflow_context *subflow; - struct socket *sock; - struct sock *sk; - - sk = (struct sock *)msk; - sock = sk->sk_socket; - subflow = mptcp_subflow_ctx(ssk); - - /* detach the msk socket */ - list_del_init(&subflow->node); - sock_orphan(sk); - sock->sk = NULL; - - /* socket is now TCP */ - lock_sock(ssk); - sock_graft(ssk, sock); - if (subflow->conn) { - /* We can't release the ULP data on a live socket, - * restore the tcp callback - */ - mptcp_subflow_tcp_fallback(ssk, subflow); - sock_put(subflow->conn); - subflow->conn = NULL; - } - release_sock(ssk); - sock->ops = tcp_proto_ops(ssk); - - /* destroy the left-over msk sock */ - __mptcp_close(sk, 0); - return sock; -} /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not * completed yet or has failed, return the subflow socket. @@ -93,10 +48,6 @@ static bool __mptcp_needs_tcp_fallback(const struct mptcp_sock *msk) return msk->first && !sk_is_mptcp(msk->first); } -/* if the mp_capable handshake has failed, it fallbacks msk to plain TCP, - * releases the socket lock and returns a reference to the now TCP socket. - * Otherwise returns NULL - */ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) { sock_owned_by_me((const struct sock *)msk); @@ -105,15 +56,11 @@ static struct socket *__mptcp_tcp_fallback(struct mptcp_sock *msk) return NULL; if (msk->subflow) { - /* the first subflow is an active connection, discart the - * paired socket - */ - msk->subflow->sk = NULL; - sock_release(msk->subflow); - msk->subflow = NULL; + release_sock((struct sock *)msk); + return msk->subflow; } - return __mptcp_fallback_to_tcp(msk, msk->first); + return NULL; } static bool __mptcp_can_create_subflow(const struct mptcp_sock *msk) @@ -640,12 +587,14 @@ static void mptcp_subflow_shutdown(struct sock *ssk, int how) } /* Called with msk lock held, releases such lock before returning */ -static void __mptcp_close(struct sock *sk, long timeout) +static void mptcp_close(struct sock *sk, long timeout) { struct mptcp_subflow_context *subflow, *tmp; struct mptcp_sock *msk = mptcp_sk(sk); LIST_HEAD(conn_list); + lock_sock(sk); + mptcp_token_destroy(msk->token); inet_sk_state_store(sk, TCP_CLOSE); @@ -662,12 +611,6 @@ static void __mptcp_close(struct sock *sk, long timeout) sk_common_release(sk); } -static void mptcp_close(struct sock *sk, long timeout) -{ - lock_sock(sk); - __mptcp_close(sk, timeout); -} - static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) { #if IS_ENABLED(CONFIG_MPTCP_IPV6) @@ -691,6 +634,30 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; } +#if IS_ENABLED(CONFIG_MPTCP_IPV6) +static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) +{ + unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); + + return (struct ipv6_pinfo *)(((u8 *)sk) + offset); +} +#endif + +struct sock *mptcp_sk_clone_lock(const struct sock *sk) +{ + struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); + + if (!nsk) + return NULL; + +#if IS_ENABLED(CONFIG_MPTCP_IPV6) + if (nsk->sk_family == AF_INET6) + inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); +#endif + + return nsk; +} + static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, bool kern) { @@ -721,7 +688,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, lock_sock(sk); local_bh_disable(); - new_mptcp_sock = sk_clone_lock(sk, GFP_ATOMIC); + new_mptcp_sock = mptcp_sk_clone_lock(sk); if (!new_mptcp_sock) { *err = -ENOBUFS; local_bh_enable(); @@ -1270,8 +1237,7 @@ int mptcp_proto_v6_init(void) strcpy(mptcp_v6_prot.name, "MPTCPv6"); mptcp_v6_prot.slab = NULL; mptcp_v6_prot.destroy = mptcp_v6_destroy; - mptcp_v6_prot.obj_size = sizeof(struct mptcp_sock) + - sizeof(struct ipv6_pinfo); + mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); err = proto_register(&mptcp_v6_prot, 1); if (err) diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index dbdbc4f18b5e..c9f34b0a11df 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op) } /* - * Final call destruction under RCU. + * Final call destruction - but must be done in process context. */ -static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) +static void rxrpc_destroy_call(struct work_struct *work) { - struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); + struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); struct rxrpc_net *rxnet = call->rxnet; rxrpc_put_connection(call->conn); @@ -579,6 +579,22 @@ static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) } /* + * Final call destruction under RCU. + */ +static void rxrpc_rcu_destroy_call(struct rcu_head *rcu) +{ + struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu); + + if (in_softirq()) { + INIT_WORK(&call->processor, rxrpc_destroy_call); + if (!rxrpc_queue_work(&call->processor)) + BUG(); + } else { + rxrpc_destroy_call(&call->processor); + } +} + +/* * clean up a call */ void rxrpc_cleanup_call(struct rxrpc_call *call) diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index c0b3154f7a7e..19e141eeed17 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -171,8 +171,6 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn, _enter("%d,%x", conn->debug_id, call->cid); - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); - if (rcu_access_pointer(chan->call) == call) { /* Save the result of the call so that we can repeat it if necessary * through the channel, whilst disposing of the actual call record. @@ -225,6 +223,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call) __rxrpc_disconnect_call(conn, call); spin_unlock(&conn->channel_lock); + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); conn->idle_timestamp = jiffies; } diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 0323aee03de7..09b7dc5fe7e0 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c @@ -365,7 +365,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, err = tcindex_filter_result_init(&new_filter_result, net); if (err < 0) - goto errout1; + goto errout_alloc; if (old_r) cr = r->res; @@ -484,7 +484,6 @@ errout_alloc: tcindex_free_perfect_hash(cp); else if (balloc == 2) kfree(cp->h); -errout1: tcf_exts_destroy(&new_filter_result.exts); errout: kfree(cp); diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index bbd0dea6b6b9..214657eb3dfd 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -349,9 +349,9 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt, while (sch->q.qlen > sch->limit) { struct sk_buff *skb = fq_pie_qdisc_dequeue(sch); - kfree_skb(skb); len_dropped += qdisc_pkt_len(skb); num_dropped += 1; + rtnl_kfree_skbs(skb, skb); } qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped); diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index c609373c8661..660fc45ee40f 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(taprio_list_lock); #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD) +#define TAPRIO_FLAGS_INVALID U32_MAX struct sched_entry { struct list_head list; @@ -766,6 +767,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, }; static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, @@ -1367,6 +1369,33 @@ static int taprio_mqprio_cmp(const struct net_device *dev, return 0; } +/* The semantics of the 'flags' argument in relation to 'change()' + * requests, are interpreted following two rules (which are applied in + * this order): (1) an omitted 'flags' argument is interpreted as + * zero; (2) the 'flags' of a "running" taprio instance cannot be + * changed. + */ +static int taprio_new_flags(const struct nlattr *attr, u32 old, + struct netlink_ext_ack *extack) +{ + u32 new = 0; + + if (attr) + new = nla_get_u32(attr); + + if (old != TAPRIO_FLAGS_INVALID && old != new) { + NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); + return -EOPNOTSUPP; + } + + if (!taprio_flags_valid(new)) { + NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); + return -EINVAL; + } + + return new; +} + static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -1375,7 +1404,6 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, struct taprio_sched *q = qdisc_priv(sch); struct net_device *dev = qdisc_dev(sch); struct tc_mqprio_qopt *mqprio = NULL; - u32 taprio_flags = 0; unsigned long flags; ktime_t start; int i, err; @@ -1388,21 +1416,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, if (tb[TCA_TAPRIO_ATTR_PRIOMAP]) mqprio = nla_data(tb[TCA_TAPRIO_ATTR_PRIOMAP]); - if (tb[TCA_TAPRIO_ATTR_FLAGS]) { - taprio_flags = nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]); - - if (q->flags != 0 && q->flags != taprio_flags) { - NL_SET_ERR_MSG_MOD(extack, "Changing 'flags' of a running schedule is not supported"); - return -EOPNOTSUPP; - } else if (!taprio_flags_valid(taprio_flags)) { - NL_SET_ERR_MSG_MOD(extack, "Specified 'flags' are not valid"); - return -EINVAL; - } + err = taprio_new_flags(tb[TCA_TAPRIO_ATTR_FLAGS], + q->flags, extack); + if (err < 0) + return err; - q->flags = taprio_flags; - } + q->flags = err; - err = taprio_parse_mqprio_opt(dev, mqprio, extack, taprio_flags); + err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); if (err < 0) return err; @@ -1444,7 +1465,20 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, taprio_set_picos_per_byte(dev, q); - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) + if (mqprio) { + netdev_set_num_tc(dev, mqprio->num_tc); + for (i = 0; i < mqprio->num_tc; i++) + netdev_set_tc_queue(dev, i, + mqprio->count[i], + mqprio->offset[i]); + + /* Always use supplied priority mappings */ + for (i = 0; i <= TC_BITMASK; i++) + netdev_set_prio_tc_map(dev, i, + mqprio->prio_tc_map[i]); + } + + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); else err = taprio_disable_offload(dev, q, extack); @@ -1464,27 +1498,14 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); } - if (!TXTIME_ASSIST_IS_ENABLED(taprio_flags) && - !FULL_OFFLOAD_IS_ENABLED(taprio_flags) && + if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && + !FULL_OFFLOAD_IS_ENABLED(q->flags) && !hrtimer_active(&q->advance_timer)) { hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); q->advance_timer.function = advance_sched; } - if (mqprio) { - netdev_set_num_tc(dev, mqprio->num_tc); - for (i = 0; i < mqprio->num_tc; i++) - netdev_set_tc_queue(dev, i, - mqprio->count[i], - mqprio->offset[i]); - - /* Always use supplied priority mappings */ - for (i = 0; i <= TC_BITMASK; i++) - netdev_set_prio_tc_map(dev, i, - mqprio->prio_tc_map[i]); - } - - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) { + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { q->dequeue = taprio_dequeue_offload; q->peek = taprio_peek_offload; } else { @@ -1501,9 +1522,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto unlock; } - if (TXTIME_ASSIST_IS_ENABLED(taprio_flags)) { - setup_txtime(q, new_admin, start); + setup_txtime(q, new_admin, start); + if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { if (!oper) { rcu_assign_pointer(q->oper_sched, new_admin); err = 0; @@ -1528,7 +1549,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, spin_unlock_irqrestore(&q->current_entry_lock, flags); - if (FULL_OFFLOAD_IS_ENABLED(taprio_flags)) + if (FULL_OFFLOAD_IS_ENABLED(q->flags)) taprio_offload_config_changed(q); } @@ -1567,7 +1588,7 @@ static void taprio_destroy(struct Qdisc *sch) } q->qdiscs = NULL; - netdev_set_num_tc(dev, 0); + netdev_reset_tc(dev); if (q->oper_sched) call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); @@ -1597,6 +1618,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt, * and get the valid one on taprio_change(). */ q->clockid = -1; + q->flags = TAPRIO_FLAGS_INVALID; spin_lock(&taprio_list_lock); list_add(&q->taprio_list, &taprio_list); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 321af97c7bbe..62c12cb5763e 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -189,11 +189,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return unix_peer(osk) == NULL || unix_our_peer(sk, osk); } -static inline int unix_recvq_full(struct sock const *sk) +static inline int unix_recvq_full(const struct sock *sk) { return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; } +static inline int unix_recvq_full_lockless(const struct sock *sk) +{ + return skb_queue_len_lockless(&sk->sk_receive_queue) > + READ_ONCE(sk->sk_max_ack_backlog); +} + struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -1758,7 +1764,8 @@ restart_locked: * - unix_peer(sk) == sk by time of get but disconnected before lock */ if (other != sk && - unlikely(unix_peer(other) != sk && unix_recvq_full(other))) { + unlikely(unix_peer(other) != sk && + unix_recvq_full_lockless(other))) { if (timeo) { timeo = unix_wait_for_peer(other, timeo); |