summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorJakub Kicinski <kuba@kernel.org>2020-09-05 06:18:58 +0200
committerJakub Kicinski <kuba@kernel.org>2020-09-05 06:28:59 +0200
commit44a8c4f33c0073ca614db79f22e023811bdd0f3c (patch)
tree2d29271961a485f621bd14294ec57e816290541e /net
parentsmsc9420: switch from 'pci_' to 'dma_' API (diff)
parentMerge tag 's390-5.9-5' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/... (diff)
downloadlinux-44a8c4f33c0073ca614db79f22e023811bdd0f3c.tar.xz
linux-44a8c4f33c0073ca614db79f22e023811bdd0f3c.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
We got slightly different patches removing a double word in a comment in net/ipv4/raw.c - picked the version from net. Simple conflict in drivers/net/ethernet/ibm/ibmvnic.c. Use cached values instead of VNIC login response buffer (following what commit 507ebe6444a4 ("ibmvnic: Fix use-after-free of VNIC login response buffer") did). Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan_dev.c2
-rw-r--r--net/9p/trans_xen.c2
-rw-r--r--net/atm/common.c4
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/resources.c8
-rw-r--r--net/batman-adv/bat_v_ogm.c11
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c5
-rw-r--r--net/batman-adv/gateway_client.c6
-rw-r--r--net/bpf/test_run.c2
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/can/j1939/socket.c2
-rw-r--r--net/can/j1939/transport.c20
-rw-r--r--net/ceph/ceph_hash.c20
-rw-r--r--net/ceph/crush/mapper.c2
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/ceph/osd_client.c4
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/dev_ioctl.c6
-rw-r--r--net/core/devlink.c4
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c4
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/skmsg.c1
-rw-r--r--net/core/sock.c4
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_table.c2
-rw-r--r--net/decnet/sysctl_net_decnet.c2
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ieee802154/6lowpan/reassembly.c2
-rw-r--r--net/ieee802154/6lowpan/rx.c4
-rw-r--r--net/ipv4/fib_trie.c3
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c2
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv6/sysctl_net_ipv6.c3
-rw-r--r--net/iucv/af_iucv.c10
-rw-r--r--net/l3mdev/l3mdev.c2
-rw-r--r--net/mac80211/airtime.c202
-rw-r--r--net/mac80211/sta_info.h5
-rw-r--r--net/mac80211/status.c43
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mptcp/protocol.c6
-rw-r--r--net/ncsi/ncsi-manage.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c2
-rw-r--r--net/netfilter/nf_conntrack_pptp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c39
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c26
-rw-r--r--net/netfilter/nf_tables_api.c64
-rw-r--r--net/netfilter/nfnetlink.c11
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c2
-rw-r--r--net/netfilter/nft_flow_offload.c2
-rw-r--r--net/netfilter/nft_payload.c4
-rw-r--r--net/netfilter/nft_set_rbtree.c57
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlabel/netlabel_domainhash.c59
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netlink/policy.c2
-rw-r--r--net/netrom/nr_in.c2
-rw-r--r--net/netrom/nr_route.c8
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/openvswitch/flow.c2
-rw-r--r--net/packet/af_packet.c9
-rw-r--r--net/phonet/pep.c10
-rw-r--r--net/rds/send.c2
-rw-r--r--net/rose/rose_in.c2
-rw-r--r--net/rose/rose_route.c4
-rw-r--r--net/rxrpc/af_rxrpc.c6
-rw-r--r--net/rxrpc/ar-internal.h13
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/call_object.c1
-rw-r--r--net/rxrpc/conn_client.c2
-rw-r--r--net/rxrpc/input.c129
-rw-r--r--net/rxrpc/local_object.c2
-rw-r--r--net/rxrpc/output.c82
-rw-r--r--net/rxrpc/peer_event.c2
-rw-r--r--net/rxrpc/peer_object.c16
-rw-r--r--net/rxrpc/recvmsg.c2
-rw-r--r--net/rxrpc/rtt.c3
-rw-r--r--net/rxrpc/rxkad.c3
-rw-r--r--net/rxrpc/sendmsg.c6
-rw-r--r--net/sched/sch_cake.c2
-rw-r--r--net/sched/sch_red.c20
-rw-r--r--net/sched/sch_taprio.c30
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/outqueue.c6
-rw-r--r--net/sctp/sm_make_chunk.c2
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/sm_statefuns.c2
-rw-r--r--net/sctp/socket.c16
-rw-r--r--net/smc/smc_close.c17
-rw-r--r--net/smc/smc_core.c3
-rw-r--r--net/smc/smc_llc.c15
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c2
-rw-r--r--net/sunrpc/auth_gss/trace.c1
-rw-r--r--net/sunrpc/clnt.c22
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sunrpc/xprtrdma/verbs.c2
-rw-r--r--net/sunrpc/xprtsock.c8
-rw-r--r--net/tipc/crypto.c12
-rw-r--r--net/tipc/group.c2
-rw-r--r--net/tipc/socket.c9
-rw-r--r--net/unix/af_unix.c2
-rw-r--r--net/wireless/chan.c15
-rw-r--r--net/wireless/nl80211.c2
-rw-r--r--net/wireless/reg.c3
-rw-r--r--net/wireless/util.c8
-rw-r--r--net/x25/x25_facilities.c2
-rw-r--r--net/x25/x25_in.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
116 files changed, 768 insertions, 482 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3dd7c972677b..ec8408d1638f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -367,7 +367,7 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCSHWTSTAMP:
if (!net_eq(dev_net(dev), &init_net))
break;
- /* fall through */
+ fallthrough;
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 3debad93be1a..bc8807d9281f 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -520,7 +520,7 @@ static void xen_9pfs_front_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* fall through - Missed the backend's CLOSING state */
+ fallthrough; /* Missed the backend's CLOSING state */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/net/atm/common.c b/net/atm/common.c
index 84367b844b14..1cfa9bf1d187 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -297,7 +297,7 @@ static int adjust_tp(struct atm_trafprm *tp, unsigned char aal)
break;
default:
pr_warn("AAL problems ... (%d)\n", aal);
- /* fall through */
+ fallthrough;
case ATM_AAL5:
max_sdu = ATM_MAX_AAL5_PDU;
}
@@ -417,7 +417,7 @@ static int __vcc_connect(struct atm_vcc *vcc, struct atm_dev *dev, short vpi,
case ATM_NO_AAL:
/* ATM_AAL5 is also used in the "0 for default" case */
vcc->qos.aal = ATM_AAL5;
- /* fall through */
+ fallthrough;
case ATM_AAL5:
error = atm_init_aal5(vcc);
vcc->stats = &dev->stats.aal5;
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 875fc0bc1780..b570ef919c28 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -380,7 +380,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
if (mesg->content.normal.no_source_le_narp)
break;
- /* FALL THROUGH */
+ fallthrough;
case l_arp_update:
lec_arp_update(priv, mesg->content.normal.mac_addr,
mesg->content.normal.atm_addr,
diff --git a/net/atm/resources.c b/net/atm/resources.c
index 94bdc6527ee8..53236986dfe0 100644
--- a/net/atm/resources.c
+++ b/net/atm/resources.c
@@ -266,7 +266,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
goto done;
}
}
- /* fall through */
+ fallthrough;
case ATM_SETESIF:
{
unsigned char esi[ESI_LEN];
@@ -288,7 +288,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
error = -EPERM;
goto done;
}
- /* fall through */
+ fallthrough;
case ATM_GETSTAT:
size = sizeof(struct atm_dev_stats);
error = fetch_stats(dev, buf, cmd == ATM_GETSTATZ);
@@ -361,7 +361,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
error = -EINVAL;
goto done;
}
- /* fall through */
+ fallthrough;
case ATM_SETCIRANGE:
case SONET_GETSTATZ:
case SONET_SETDIAG:
@@ -371,7 +371,7 @@ int atm_dev_ioctl(unsigned int cmd, void __user *buf, int __user *sioc_len,
error = -EPERM;
goto done;
}
- /* fall through */
+ fallthrough;
default:
if (IS_ENABLED(CONFIG_COMPAT) && compat) {
#ifdef CONFIG_COMPAT
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 11c3f98ba938..8c1148fc73d7 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -882,6 +882,12 @@ static void batadv_v_ogm_process(const struct sk_buff *skb, int ogm_offset,
ntohl(ogm_packet->seqno), ogm_throughput, ogm_packet->ttl,
ogm_packet->version, ntohs(ogm_packet->tvlv_len));
+ if (batadv_is_my_mac(bat_priv, ogm_packet->orig)) {
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Drop packet: originator packet from ourself\n");
+ return;
+ }
+
/* If the throughput metric is 0, immediately drop the packet. No need
* to create orig_node / neigh_node for an unusable route.
*/
@@ -1009,11 +1015,6 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, ethhdr->h_source))
goto free_skb;
- ogm_packet = (struct batadv_ogm2_packet *)skb->data;
-
- if (batadv_is_my_mac(bat_priv, ogm_packet->orig))
- goto free_skb;
-
batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_MGMT_RX_BYTES,
skb->len + ETH_HLEN);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5c41cc52bc53..ab6cec3c7586 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -437,7 +437,10 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- netif_rx(skb);
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
out:
if (primary_if)
batadv_hardif_put(primary_if);
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index a18dcc686dc3..ef3f85b576c4 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -703,8 +703,10 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
chaddr_offset = *header_len + BATADV_DHCP_CHADDR_OFFSET;
/* store the client address if the message is going to a client */
- if (ret == BATADV_DHCP_TO_CLIENT &&
- pskb_may_pull(skb, chaddr_offset + ETH_ALEN)) {
+ if (ret == BATADV_DHCP_TO_CLIENT) {
+ if (!pskb_may_pull(skb, chaddr_offset + ETH_ALEN))
+ return BATADV_DHCP_NO;
+
/* check if the DHCP packet carries an Ethernet DHCP */
p = skb->data + *header_len + BATADV_DHCP_HTYPE_OFFSET;
if (*p != BATADV_DHCP_HTYPE_ETHERNET)
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 99eb8c6c0fbc..a66f211726e7 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -425,7 +425,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
case BPF_PROG_TYPE_SCHED_CLS:
case BPF_PROG_TYPE_SCHED_ACT:
is_l2 = true;
- /* fall through */
+ fallthrough;
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
case BPF_PROG_TYPE_LWT_XMIT:
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index ce2767e9cec6..7b0af33bdb97 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -116,7 +116,7 @@ static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
if (segmented) {
if (rfml->incomplete_frm == NULL) {
/* Initial Segment */
- if (cfpkt_peek_head(pkt, rfml->seghead, 6) < 0)
+ if (cfpkt_peek_head(pkt, rfml->seghead, 6) != 0)
goto out;
rfml->pdu_size = get_unaligned_le16(rfml->seghead+4);
@@ -233,7 +233,7 @@ static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
if (cfpkt_getlen(pkt) > rfml->fragment_size + RFM_HEAD_SIZE)
err = cfpkt_peek_head(pkt, head, 6);
- if (err < 0)
+ if (err != 0)
goto out;
while (cfpkt_getlen(frontpkt) > rfml->fragment_size + RFM_HEAD_SIZE) {
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index b93876c57fc4..1be4c898b2fa 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -1086,7 +1086,7 @@ static int j1939_sk_send_loop(struct j1939_priv *priv, struct sock *sk,
break;
case -ERESTARTSYS:
ret = -EINTR;
- /* fall through */
+ fallthrough;
case -EAGAIN: /* OK */
if (todo_size != size)
ret = size - todo_size;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index a8dd956b5e8e..0cec4152f979 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -860,7 +860,7 @@ static int j1939_xtp_txnext_transmiter(struct j1939_session *session)
return ret;
}
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_CTS:
case 0xff: /* did some data */
case J1939_ETP_CMD_DPO:
@@ -1764,12 +1764,12 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
case J1939_ETP_CMD_DPO:
if (skcb->addr.type == J1939_ETP)
break;
- /* fall through */
- case J1939_TP_CMD_BAM: /* fall through */
+ fallthrough;
+ case J1939_TP_CMD_BAM:
case J1939_TP_CMD_CTS: /* fall through */
if (skcb->addr.type != J1939_ETP)
break;
- /* fall through */
+ fallthrough;
default:
netdev_info(priv->ndev, "%s: 0x%p: last %02x\n", __func__,
session, session->last_cmd);
@@ -1965,8 +1965,8 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
switch (cmd) {
case J1939_ETP_CMD_RTS:
extd = J1939_ETP;
- /* fall through */
- case J1939_TP_CMD_BAM: /* fall through */
+ fallthrough;
+ case J1939_TP_CMD_BAM:
case J1939_TP_CMD_RTS: /* fall through */
if (skcb->addr.type != extd)
return;
@@ -1987,7 +1987,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
case J1939_ETP_CMD_CTS:
extd = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_CTS:
if (skcb->addr.type != extd)
return;
@@ -2014,7 +2014,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
case J1939_ETP_CMD_EOMA:
extd = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_CMD_EOMA:
if (skcb->addr.type != extd)
return;
@@ -2050,14 +2050,14 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
switch (skcb->addr.pgn) {
case J1939_ETP_PGN_DAT:
skcb->addr.type = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_PGN_DAT:
j1939_xtp_rx_dat(priv, skb);
break;
case J1939_ETP_PGN_CTL:
skcb->addr.type = J1939_ETP;
- /* fall through */
+ fallthrough;
case J1939_TP_PGN_CTL:
if (skb->len < 8)
return 0; /* Don't care. Nothing to extract here */
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 81e1e006c540..16a47c0eef37 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -50,35 +50,35 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
switch (len) {
case 11:
c = c + ((__u32)k[10] << 24);
- /* fall through */
+ fallthrough;
case 10:
c = c + ((__u32)k[9] << 16);
- /* fall through */
+ fallthrough;
case 9:
c = c + ((__u32)k[8] << 8);
/* the first byte of c is reserved for the length */
- /* fall through */
+ fallthrough;
case 8:
b = b + ((__u32)k[7] << 24);
- /* fall through */
+ fallthrough;
case 7:
b = b + ((__u32)k[6] << 16);
- /* fall through */
+ fallthrough;
case 6:
b = b + ((__u32)k[5] << 8);
- /* fall through */
+ fallthrough;
case 5:
b = b + k[4];
- /* fall through */
+ fallthrough;
case 4:
a = a + ((__u32)k[3] << 24);
- /* fall through */
+ fallthrough;
case 3:
a = a + ((__u32)k[2] << 16);
- /* fall through */
+ fallthrough;
case 2:
a = a + ((__u32)k[1] << 8);
- /* fall through */
+ fallthrough;
case 1:
a = a + k[0];
/* case 0: nothing left to add */
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index 07e5614eb3f1..7057f8db4f99 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -987,7 +987,7 @@ int crush_do_rule(const struct crush_map *map,
case CRUSH_RULE_CHOOSELEAF_FIRSTN:
case CRUSH_RULE_CHOOSE_FIRSTN:
firstn = 1;
- /* fall through */
+ fallthrough;
case CRUSH_RULE_CHOOSELEAF_INDEP:
case CRUSH_RULE_CHOOSE_INDEP:
if (wsize == 0)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 27d6ab11f9ee..bdfd66ba3843 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -412,7 +412,7 @@ static void ceph_sock_state_change(struct sock *sk)
switch (sk->sk_state) {
case TCP_CLOSE:
dout("%s TCP_CLOSE\n", __func__);
- /* fall through */
+ fallthrough;
case TCP_CLOSE_WAIT:
dout("%s TCP_CLOSE_WAIT\n", __func__);
con_sock_state_closing(con);
@@ -2751,7 +2751,7 @@ more:
switch (ret) {
case -EBADMSG:
con->error_msg = "bad crc/signature";
- /* fall through */
+ fallthrough;
case -EBADE:
ret = -EIO;
break;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 3d8c8015e976..d633a0aeaa55 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -1307,7 +1307,7 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
* request had a non-zero tid. Work around this weirdness
* by allocating a new message.
*/
- /* fall through */
+ fallthrough;
case CEPH_MSG_MON_MAP:
case CEPH_MSG_MDS_MAP:
case CEPH_MSG_OSD_MAP:
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e4fbcad6e7d8..7901ab6c79fd 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -3854,7 +3854,7 @@ static void scan_requests(struct ceph_osd *osd,
if (!force_resend && !force_resend_writes)
break;
- /* fall through */
+ fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_linger_map_check(lreq);
/*
@@ -3891,7 +3891,7 @@ static void scan_requests(struct ceph_osd *osd,
!force_resend_writes))
break;
- /* fall through */
+ fallthrough;
case CALC_TARGET_NEED_RESEND:
cancel_map_check(req);
unlink_request(osd, req);
diff --git a/net/core/dev.c b/net/core/dev.c
index d42c9ea0c3c0..4086d335978c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4690,10 +4690,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
break;
default:
bpf_warn_invalid_xdp_action(act);
- /* fall through */
+ fallthrough;
case XDP_ABORTED:
trace_xdp_exception(skb->dev, xdp_prog, act);
- /* fall through */
+ fallthrough;
case XDP_DROP:
do_drop:
kfree_skb(skb);
@@ -6612,12 +6612,13 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
netdev_err_once(dev, "%s() called with weight %d\n", __func__,
weight);
napi->weight = weight;
- list_add(&napi->dev_list, &dev->napi_list);
napi->dev = dev;
#ifdef CONFIG_NETPOLL
napi->poll_owner = -1;
#endif
set_bit(NAPI_STATE_SCHED, &napi->state);
+ set_bit(NAPI_STATE_NPSVC, &napi->state);
+ list_add_rcu(&napi->dev_list, &dev->napi_list);
napi_hash_add(napi);
}
EXPORT_SYMBOL(netif_napi_add);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b2cf9b7bb7b8..205e92e604ef 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -322,7 +322,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
err = net_hwtstamp_validate(ifr);
if (err)
return err;
- /* fall through */
+ fallthrough;
/*
* Unknown or private ioctl
@@ -478,7 +478,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCSIFTXQLEN:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
/*
* These ioctl calls:
* - require local superuser power.
@@ -503,7 +503,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, bool *need_c
case SIOCSHWTSTAMP:
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
- /* fall through */
+ fallthrough;
case SIOCBONDSLAVEINFOQUERY:
case SIOCBONDINFOQUERY:
dev_load(net, ifr->ifr_name);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 58c8bb07fa19..49e911c19881 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -6199,8 +6199,8 @@ devlink_trap_action_get_from_info(struct genl_info *info,
val = nla_get_u8(info->attrs[DEVLINK_ATTR_TRAP_ACTION]);
switch (val) {
- case DEVLINK_TRAP_ACTION_DROP: /* fall-through */
- case DEVLINK_TRAP_ACTION_TRAP: /* fall-through */
+ case DEVLINK_TRAP_ACTION_DROP:
+ case DEVLINK_TRAP_ACTION_TRAP:
case DEVLINK_TRAP_ACTION_MIRROR:
*p_trap_action = val;
break;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index b09bebeadf0b..9704522b0872 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -1189,7 +1189,7 @@ static int net_dm_alert_mode_get_from_info(struct genl_info *info,
val = nla_get_u8(info->attrs[NET_DM_ATTR_ALERT_MODE]);
switch (val) {
- case NET_DM_ALERT_MODE_SUMMARY: /* fall-through */
+ case NET_DM_ALERT_MODE_SUMMARY:
case NET_DM_ALERT_MODE_PACKET:
*p_alert_mode = val;
break;
diff --git a/net/core/filter.c b/net/core/filter.c
index 47eef9a0be6a..2ad9c0ef1946 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -9625,7 +9625,7 @@ sk_reuseport_is_valid_access(int off, int size,
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
if (size < sizeof_field(struct sk_buff, protocol))
return false;
- /* fall through */
+ fallthrough;
case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
case bpf_ctx_range(struct sk_reuseport_md, len):
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 093e90e52bc2..2338753e936b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -162,7 +162,7 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int cpu = smp_processor_id();
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
poll_one_napi(napi);
smp_store_release(&napi->poll_owner, -1);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index b53b6d38c4df..44fdbb9c6e53 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3430,7 +3430,7 @@ xmit_more:
net_info_ratelimited("%s xmit error: %d\n",
pkt_dev->odevname, ret);
pkt_dev->errors++;
- /* fall through */
+ fallthrough;
case NETDEV_TX_BUSY:
/* Retry it next time */
refcount_dec(&(pkt_dev->skb->users));
@@ -3699,7 +3699,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
cpu_to_node(cpu),
"kpktgend_%d", cpu);
if (IS_ERR(p)) {
- pr_err("kernel_thread() failed for cpu %d\n", t->cpu);
+ pr_err("kthread_create_on_node() failed for cpu %d\n", t->cpu);
list_del(&t->th_list);
kfree(t);
return PTR_ERR(p);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index a5c11aae9c89..bfd748346f20 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -820,6 +820,7 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
+#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -837,6 +838,7 @@ void consume_skb(struct sk_buff *skb)
__kfree_skb(skb);
}
EXPORT_SYMBOL(consume_skb);
+#endif
/**
* consume_stateless_skb - free an skbuff, assuming it is stateless
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 1c81caf9630f..4b5f7c8fecd1 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -794,7 +794,6 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
sk_psock_skb_redirect(skb);
break;
case __SK_DROP:
- /* fall-through */
default:
out_free:
kfree_skb(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index 64d2aec5ed45..ba9e7d91e2ef 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -995,7 +995,7 @@ set_sndbuf:
break;
case SO_TIMESTAMPING_NEW:
sock_set_flag(sk, SOCK_TSTAMP_NEW);
- /* fall through */
+ fallthrough;
case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
@@ -3240,7 +3240,7 @@ void sk_common_release(struct sock *sk)
sk->sk_prot->destroy(sk);
/*
- * Observation: when sock_common_release is called, processes have
+ * Observation: when sk_common_release is called, processes have
* no access to socket. But net still has.
* Step one, detach it from networking:
*
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 3b53d766789d..5dbd45dc35ad 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -623,12 +623,12 @@ static void dn_destroy_sock(struct sock *sk)
goto disc_reject;
case DN_RUN:
scp->state = DN_DI;
- /* fall through */
+ fallthrough;
case DN_DI:
case DN_DR:
disc_reject:
dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation);
- /* fall through */
+ fallthrough;
case DN_NC:
case DN_NR:
case DN_RJ:
@@ -642,7 +642,7 @@ disc_reject:
break;
default:
printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n");
- /* fall through */
+ fallthrough;
case DN_O:
dn_stop_slow_timer(sk);
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index c68503a18025..c97bdca5ec30 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -483,7 +483,7 @@ static void dn_nsp_disc_conf(struct sock *sk, struct sk_buff *skb)
break;
case DN_RUN:
sk->sk_shutdown |= SHUTDOWN_MASK;
- /* fall through */
+ fallthrough;
case DN_CC:
scp->state = DN_CN;
}
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 33fefb0aebca..4086f9c746af 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -156,7 +156,7 @@ static void dn_rehash_zone(struct dn_zone *dz)
default:
printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
old_divisor);
- /* fall through */
+ fallthrough;
case 256:
new_divisor = 1024;
new_hashmask = 0x3FF;
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index deae519bdeec..67b5ab2657b7 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -75,7 +75,7 @@ static void strip_it(char *str)
case '\r':
case ':':
*str = 0;
- /* Fallthrough */
+ fallthrough;
case 0:
return;
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 41d60eeefdbd..9af1a2d0cec4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -2009,7 +2009,7 @@ static int dsa_slave_switchdev_event(struct notifier_block *unused,
switchdev_work->event = event;
switch (event) {
- case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
case SWITCHDEV_FDB_DEL_TO_DEVICE:
if (dsa_slave_switchdev_fdb_work_init(switchdev_work, ptr))
goto err_fdb_work_init;
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index bbe9b3b2d395..be6f06adefe0 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -195,7 +195,7 @@ static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
- /* fall-through */
+ fallthrough;
default:
/* all others failure */
return NET_RX_DROP;
diff --git a/net/ieee802154/6lowpan/rx.c b/net/ieee802154/6lowpan/rx.c
index b34d050c9687..517e6493f5d1 100644
--- a/net/ieee802154/6lowpan/rx.c
+++ b/net/ieee802154/6lowpan/rx.c
@@ -35,11 +35,11 @@ static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res)
net_warn_ratelimited("%s: received unknown dispatch\n",
__func__);
- /* fall-through */
+ fallthrough;
case RX_DROP_UNUSABLE:
kfree_skb(skb);
- /* fall-through */
+ fallthrough;
case RX_DROP:
return NET_RX_DROP;
case RX_QUEUED:
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index c89b46fec153..ffc5332f1390 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2121,7 +2121,8 @@ void fib_info_notify_update(struct net *net, struct nl_info *info)
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct fib_table *tb;
- hlist_for_each_entry_rcu(tb, head, tb_hlist)
+ hlist_for_each_entry_rcu(tb, head, tb_hlist,
+ lockdep_rtnl_is_held())
__fib_info_notify_update(net, tb, info);
}
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 7afde8828b4c..3f248a19faa3 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -3,7 +3,7 @@
* nf_nat_pptp.c
*
* NAT support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dfba39473b1d..1170653a89cd 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -611,8 +611,8 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} else if (!ipc.oif) {
ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
- /* oif is set, packet is to local broadcast and
- * uc_index is set. oif is most likely set
+ /* oif is set, packet is to local broadcast
+ * and uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
* If so, we want to allow the send using the uc_index.
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index fac2135aa47b..5b60a4bdd36a 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -21,6 +21,7 @@
#include <net/calipso.h>
#endif
+static int two = 2;
static int flowlabel_reflect_max = 0x7;
static int auto_flowlabels_min;
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
@@ -150,7 +151,7 @@ static struct ctl_table ipv6_table_template[] = {
.mode = 0644,
.proc_handler = proc_rt6_multipath_hash_policy,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
{
.procname = "seg6_flowlabel",
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 6ee9851ac7c6..a95af62acb52 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -418,7 +418,7 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- /* fall through */
+ fallthrough;
case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
@@ -433,7 +433,7 @@ static void iucv_sock_close(struct sock *sk)
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
- /* fall through */
+ fallthrough;
case IUCV_CLOSING:
sk->sk_state = IUCV_CLOSED;
@@ -444,7 +444,7 @@ static void iucv_sock_close(struct sock *sk)
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
- /* fall through */
+ fallthrough;
default:
iucv_sever_path(sk, 1);
@@ -2111,10 +2111,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
kfree_skb(skb);
break;
}
- /* fall through - and receive non-zero length data */
+ fallthrough; /* and receive non-zero length data */
case (AF_IUCV_FLAG_SHT):
/* shutdown request */
- /* fall through - and receive zero length data */
+ fallthrough; /* and receive zero length data */
case 0:
/* plain data frame */
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
diff --git a/net/l3mdev/l3mdev.c b/net/l3mdev/l3mdev.c
index e71ca5aec684..864326f150e2 100644
--- a/net/l3mdev/l3mdev.c
+++ b/net/l3mdev/l3mdev.c
@@ -154,7 +154,7 @@ int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
/**
- * l3mdev_fib_table - get FIB table id associated with an L3
+ * l3mdev_fib_table_rcu - get FIB table id associated with an L3
* master interface
* @dev: targeted interface
*/
diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c
index 366f76c9003d..314973033d03 100644
--- a/net/mac80211/airtime.c
+++ b/net/mac80211/airtime.c
@@ -405,18 +405,14 @@ ieee80211_calc_legacy_rate_duration(u16 bitrate, bool short_pre,
return duration;
}
-u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *status,
- int len)
+static u32 ieee80211_get_rate_duration(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ u32 *overhead)
{
- struct ieee80211_supported_band *sband;
- const struct ieee80211_rate *rate;
bool sgi = status->enc_flags & RX_ENC_FLAG_SHORT_GI;
- bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
int bw, streams;
int group, idx;
u32 duration;
- bool cck;
switch (status->bw) {
case RATE_INFO_BW_20:
@@ -437,20 +433,6 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
}
switch (status->encoding) {
- case RX_ENC_LEGACY:
- if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
- return 0;
-
- sband = hw->wiphy->bands[status->band];
- if (!sband || status->rate_idx >= sband->n_bitrates)
- return 0;
-
- rate = &sband->bitrates[status->rate_idx];
- cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
-
- return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
- cck, len);
-
case RX_ENC_VHT:
streams = status->nss;
idx = status->rate_idx;
@@ -477,51 +459,144 @@ u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
duration = airtime_mcs_groups[group].duration[idx];
duration <<= airtime_mcs_groups[group].shift;
+ *overhead = 36 + (streams << 2);
+
+ return duration;
+}
+
+
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len)
+{
+ struct ieee80211_supported_band *sband;
+ u32 duration, overhead = 0;
+
+ if (status->encoding == RX_ENC_LEGACY) {
+ const struct ieee80211_rate *rate;
+ bool sp = status->enc_flags & RX_ENC_FLAG_SHORTPRE;
+ bool cck;
+
+ if (WARN_ON_ONCE(status->band > NL80211_BAND_5GHZ))
+ return 0;
+
+ sband = hw->wiphy->bands[status->band];
+ if (!sband || status->rate_idx >= sband->n_bitrates)
+ return 0;
+
+ rate = &sband->bitrates[status->rate_idx];
+ cck = rate->flags & IEEE80211_RATE_MANDATORY_B;
+
+ return ieee80211_calc_legacy_rate_duration(rate->bitrate, sp,
+ cck, len);
+ }
+
+ duration = ieee80211_get_rate_duration(hw, status, &overhead);
+ if (!duration)
+ return 0;
+
duration *= len;
duration /= AVG_PKT_SIZE;
duration /= 1024;
- duration += 36 + (streams << 2);
-
- return duration;
+ return duration + overhead;
}
EXPORT_SYMBOL_GPL(ieee80211_calc_rx_airtime);
-static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
- struct ieee80211_tx_rate *rate,
- u8 band, int len)
+static bool ieee80211_fill_rate_info(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *stat, u8 band,
+ struct rate_info *ri)
{
- struct ieee80211_rx_status stat = {
- .band = band,
- };
+ struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
+ int i;
- if (rate->idx < 0 || !rate->count)
+ if (!ri || !sband)
+ return false;
+
+ stat->bw = ri->bw;
+ stat->nss = ri->nss;
+ stat->rate_idx = ri->mcs;
+
+ if (ri->flags & RATE_INFO_FLAGS_HE_MCS)
+ stat->encoding = RX_ENC_HE;
+ else if (ri->flags & RATE_INFO_FLAGS_VHT_MCS)
+ stat->encoding = RX_ENC_VHT;
+ else if (ri->flags & RATE_INFO_FLAGS_MCS)
+ stat->encoding = RX_ENC_HT;
+ else
+ stat->encoding = RX_ENC_LEGACY;
+
+ if (ri->flags & RATE_INFO_FLAGS_SHORT_GI)
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ stat->he_gi = ri->he_gi;
+
+ if (stat->encoding != RX_ENC_LEGACY)
+ return true;
+
+ stat->rate_idx = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (ri->legacy != sband->bitrates[i].bitrate)
+ continue;
+
+ stat->rate_idx = i;
+ return true;
+ }
+
+ return false;
+}
+
+static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat,
+ struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri, u8 band, int len)
+{
+ memset(stat, 0, sizeof(*stat));
+ stat->band = band;
+
+ if (ieee80211_fill_rate_info(hw, stat, band, ri))
return 0;
+ if (rate->idx < 0 || !rate->count)
+ return -1;
+
if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_80;
+ stat->bw = RATE_INFO_BW_80;
else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- stat.bw = RATE_INFO_BW_40;
+ stat->bw = RATE_INFO_BW_40;
else
- stat.bw = RATE_INFO_BW_20;
+ stat->bw = RATE_INFO_BW_20;
- stat.enc_flags = 0;
+ stat->enc_flags = 0;
if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- stat.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+ stat->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- stat.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ stat->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- stat.rate_idx = rate->idx;
+ stat->rate_idx = rate->idx;
if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
- stat.encoding = RX_ENC_VHT;
- stat.rate_idx = ieee80211_rate_get_vht_mcs(rate);
- stat.nss = ieee80211_rate_get_vht_nss(rate);
+ stat->encoding = RX_ENC_VHT;
+ stat->rate_idx = ieee80211_rate_get_vht_mcs(rate);
+ stat->nss = ieee80211_rate_get_vht_nss(rate);
} else if (rate->flags & IEEE80211_TX_RC_MCS) {
- stat.encoding = RX_ENC_HT;
+ stat->encoding = RX_ENC_HT;
} else {
- stat.encoding = RX_ENC_LEGACY;
+ stat->encoding = RX_ENC_LEGACY;
}
+ return 0;
+}
+
+static u32 ieee80211_calc_tx_airtime_rate(struct ieee80211_hw *hw,
+ struct ieee80211_tx_rate *rate,
+ struct rate_info *ri,
+ u8 band, int len)
+{
+ struct ieee80211_rx_status stat;
+
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+
return ieee80211_calc_rx_airtime(hw, &stat, len);
}
@@ -536,7 +611,7 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
struct ieee80211_tx_rate *rate = &info->status.rates[i];
u32 cur_duration;
- cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate,
+ cur_duration = ieee80211_calc_tx_airtime_rate(hw, rate, NULL,
info->band, len);
if (!cur_duration)
break;
@@ -572,26 +647,41 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw,
if (pubsta) {
struct sta_info *sta = container_of(pubsta, struct sta_info,
sta);
+ struct ieee80211_rx_status stat;
struct ieee80211_tx_rate *rate = &sta->tx_stats.last_rate;
- u32 airtime;
+ struct rate_info *ri = &sta->tx_stats.last_rate_info;
+ u32 duration, overhead;
+ u8 agg_shift;
- if (!(rate->flags & (IEEE80211_TX_RC_VHT_MCS |
- IEEE80211_TX_RC_MCS)))
- ampdu = false;
+ if (ieee80211_fill_rx_status(&stat, hw, rate, ri, band, len))
+ return 0;
+ if (stat.encoding == RX_ENC_LEGACY || !ampdu)
+ return ieee80211_calc_rx_airtime(hw, &stat, len);
+
+ duration = ieee80211_get_rate_duration(hw, &stat, &overhead);
/*
* Assume that HT/VHT transmission on any AC except VO will
* use aggregation. Since we don't have reliable reporting
- * of aggregation length, assume an average of 16.
+ * of aggregation length, assume an average size based on the
+ * tx rate.
* This will not be very accurate, but much better than simply
- * assuming un-aggregated tx.
+ * assuming un-aggregated tx in all cases.
*/
- airtime = ieee80211_calc_tx_airtime_rate(hw, rate, band,
- ampdu ? len * 16 : len);
- if (ampdu)
- airtime /= 16;
-
- return airtime;
+ if (duration > 400) /* <= VHT20 MCS2 1S */
+ agg_shift = 1;
+ else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */
+ agg_shift = 2;
+ else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */
+ agg_shift = 3;
+ else
+ agg_shift = 4;
+
+ duration *= len;
+ duration /= AVG_PKT_SIZE;
+ duration /= 1024;
+
+ return duration + (overhead >> agg_shift);
}
if (!conf)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 9d398c9daa4c..d5010116cf4d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -524,7 +524,7 @@ struct ieee80211_sta_rx_stats {
* @status_stats.retry_failed: # of frames that failed after retry
* @status_stats.retry_count: # of retries attempted
* @status_stats.lost_packets: # of lost packets
- * @status_stats.last_tdls_pkt_time: timestamp of last TDLS packet
+ * @status_stats.last_pkt_time: timestamp of last ACKed packet
* @status_stats.msdu_retries: # of MSDU retries
* @status_stats.msdu_failed: # of failed MSDUs
* @status_stats.last_ack: last ack timestamp (jiffies)
@@ -597,7 +597,7 @@ struct sta_info {
unsigned long filtered;
unsigned long retry_failed, retry_count;
unsigned int lost_packets;
- unsigned long last_tdls_pkt_time;
+ unsigned long last_pkt_time;
u64 msdu_retries[IEEE80211_NUM_TIDS + 1];
u64 msdu_failed[IEEE80211_NUM_TIDS + 1];
unsigned long last_ack;
@@ -611,6 +611,7 @@ struct sta_info {
u64 packets[IEEE80211_NUM_ACS];
u64 bytes[IEEE80211_NUM_ACS];
struct ieee80211_tx_rate last_rate;
+ struct rate_info last_rate_info;
u64 msdu[IEEE80211_NUM_TIDS + 1];
} tx_stats;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index adb1d30ce06e..0794396a7988 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -755,12 +755,16 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
* - current throughput (higher value for higher tpt)?
*/
#define STA_LOST_PKT_THRESHOLD 50
+#define STA_LOST_PKT_TIME HZ /* 1 sec since last ACK */
#define STA_LOST_TDLS_PKT_THRESHOLD 10
#define STA_LOST_TDLS_PKT_TIME (10*HZ) /* 10secs since last ACK */
static void ieee80211_lost_packet(struct sta_info *sta,
struct ieee80211_tx_info *info)
{
+ unsigned long pkt_time = STA_LOST_PKT_TIME;
+ unsigned int pkt_thr = STA_LOST_PKT_THRESHOLD;
+
/* If driver relies on its own algorithm for station kickout, skip
* mac80211 packet loss mechanism.
*/
@@ -773,21 +777,20 @@ static void ieee80211_lost_packet(struct sta_info *sta,
return;
sta->status_stats.lost_packets++;
- if (!sta->sta.tdls &&
- sta->status_stats.lost_packets < STA_LOST_PKT_THRESHOLD)
- return;
+ if (sta->sta.tdls) {
+ pkt_time = STA_LOST_TDLS_PKT_TIME;
+ pkt_thr = STA_LOST_PKT_THRESHOLD;
+ }
/*
* If we're in TDLS mode, make sure that all STA_LOST_TDLS_PKT_THRESHOLD
* of the last packets were lost, and that no ACK was received in the
* last STA_LOST_TDLS_PKT_TIME ms, before triggering the CQM packet-loss
* mechanism.
+ * For non-TDLS, use STA_LOST_PKT_THRESHOLD and STA_LOST_PKT_TIME
*/
- if (sta->sta.tdls &&
- (sta->status_stats.lost_packets < STA_LOST_TDLS_PKT_THRESHOLD ||
- time_before(jiffies,
- sta->status_stats.last_tdls_pkt_time +
- STA_LOST_TDLS_PKT_TIME)))
+ if (sta->status_stats.lost_packets < pkt_thr ||
+ !time_after(jiffies, sta->status_stats.last_pkt_time + pkt_time))
return;
cfg80211_cqm_pktloss_notify(sta->sdata->dev, sta->sta.addr,
@@ -1033,9 +1036,7 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
sta->status_stats.lost_packets = 0;
/* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time =
- jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else if (noack_success) {
/* nothing to do here, do not account as lost */
} else {
@@ -1137,9 +1138,17 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
struct ieee80211_supported_band *sband;
+ struct sta_info *sta;
int retry_count;
bool acked, noack_success;
+ if (pubsta) {
+ sta = container_of(pubsta, struct sta_info, sta);
+
+ if (status->rate)
+ sta->tx_stats.last_rate_info = *status->rate;
+ }
+
if (status->skb)
return __ieee80211_tx_status(hw, status);
@@ -1154,10 +1163,6 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
if (pubsta) {
- struct sta_info *sta;
-
- sta = container_of(pubsta, struct sta_info, sta);
-
if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
@@ -1168,9 +1173,8 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- /* Track when last TDLS packet was ACKed */
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ /* Track when last packet was ACKed */
+ sta->status_stats.last_pkt_time = jiffies;
} else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
return;
} else if (noack_success) {
@@ -1259,8 +1263,7 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
if (sta->status_stats.lost_packets)
sta->status_stats.lost_packets = 0;
- if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
- sta->status_stats.last_tdls_pkt_time = jiffies;
+ sta->status_stats.last_pkt_time = jiffies;
} else {
ieee80211_lost_packet(sta, info);
}
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 6fdd0c9f865a..f2868a8a50c3 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1516,7 +1516,7 @@ static void mpls_ifdown(struct net_device *dev, int event)
case NETDEV_DOWN:
case NETDEV_UNREGISTER:
nh_flags |= RTNH_F_DEAD;
- /* fall through */
+ fallthrough;
case NETDEV_CHANGE:
nh_flags |= RTNH_F_LINKDOWN;
break;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index e6216c4f308c..683196225f91 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -191,7 +191,6 @@ static void mptcp_check_data_fin_ack(struct sock *sk)
sk->sk_state_change(sk);
break;
case TCP_CLOSING:
- fallthrough;
case TCP_LAST_ACK:
inet_sk_state_store(sk, TCP_CLOSE);
sk->sk_state_change(sk);
@@ -890,7 +889,6 @@ restart:
goto out;
}
-wait_for_sndbuf:
__mptcp_flush_join_list(msk);
ssk = mptcp_subflow_get_send(msk);
while (!sk_stream_memory_free(sk) ||
@@ -980,7 +978,7 @@ wait_for_sndbuf:
*/
mptcp_set_timeout(sk, ssk);
release_sock(ssk);
- goto wait_for_sndbuf;
+ goto restart;
}
}
}
@@ -1539,7 +1537,7 @@ static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
case TCP_LISTEN:
if (!(how & RCV_SHUTDOWN))
break;
- /* fall through */
+ fallthrough;
case TCP_SYN_SENT:
tcp_disconnect(ssk, O_NONBLOCK);
break;
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 1f387be7827b..f1be3e3f6425 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -474,7 +474,7 @@ static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
switch (nd->state) {
case ncsi_dev_state_suspend:
nd->state = ncsi_dev_state_suspend_select;
- /* Fall through */
+ fallthrough;
case ncsi_dev_state_suspend_select:
ndp->pending_req_num = 1;
@@ -1302,7 +1302,7 @@ static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
switch (nd->state) {
case ncsi_dev_state_probe:
nd->state = ncsi_dev_state_probe_deselect;
- /* Fall through */
+ fallthrough;
case ncsi_dev_state_probe_deselect:
ndp->pending_req_num = 8;
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 32b028853a7c..dc2e7da2742a 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -315,7 +315,7 @@ tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
switch (skb->ip_summed) {
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, tcphoff, skb->len - tcphoff, 0);
- /* fall through */
+ fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 153d89647c87..68260d91c988 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -318,7 +318,7 @@ udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
case CHECKSUM_NONE:
skb->csum = skb_checksum(skb, udphoff,
skb->len - udphoff, 0);
- /* fall through */
+ fallthrough;
case CHECKSUM_COMPLETE:
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6) {
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 1f44d523b512..5105d4250012 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Connection tracking support for PPTP (Point to Point Tunneling Protocol).
- * PPTP is a a protocol for creating virtual private networks.
+ * PPTP is a protocol for creating virtual private networks.
* It is a specification defined by Microsoft and some vendors
* working with Microsoft. PPTP is built on top of a modified
* version of the Internet Generic Routing Encapsulation Protocol.
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 4f897b14b606..810cca24b399 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -62,6 +62,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
[SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS,
};
+#define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1
+
#define sNO SCTP_CONNTRACK_NONE
#define sCL SCTP_CONNTRACK_CLOSED
#define sCW SCTP_CONNTRACK_COOKIE_WAIT
@@ -369,6 +371,7 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
u_int32_t offset, count;
unsigned int *timeouts;
unsigned long map[256 / sizeof(unsigned long)] = { 0 };
+ bool ignore = false;
if (sctp_error(skb, dataoff, state))
return -NF_ACCEPT;
@@ -427,15 +430,39 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
/* Sec 8.5.1 (D) */
if (sh->vtag != ct->proto.sctp.vtag[dir])
goto out_unlock;
- } else if (sch->type == SCTP_CID_HEARTBEAT ||
- sch->type == SCTP_CID_HEARTBEAT_ACK) {
+ } else if (sch->type == SCTP_CID_HEARTBEAT) {
+ if (ct->proto.sctp.vtag[dir] == 0) {
+ pr_debug("Setting %d vtag %x for dir %d\n", sch->type, sh->vtag, dir);
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ } else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ ct->proto.sctp.flags |= SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.last_dir = dir;
+ ignore = true;
+ continue;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ }
+ } else if (sch->type == SCTP_CID_HEARTBEAT_ACK) {
if (ct->proto.sctp.vtag[dir] == 0) {
pr_debug("Setting vtag %x for dir %d\n",
sh->vtag, dir);
ct->proto.sctp.vtag[dir] = sh->vtag;
} else if (sh->vtag != ct->proto.sctp.vtag[dir]) {
- pr_debug("Verification tag check failed\n");
- goto out_unlock;
+ if (test_bit(SCTP_CID_DATA, map) || ignore)
+ goto out_unlock;
+
+ if ((ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) == 0 ||
+ ct->proto.sctp.last_dir == dir)
+ goto out_unlock;
+
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
+ ct->proto.sctp.vtag[dir] = sh->vtag;
+ ct->proto.sctp.vtag[!dir] = 0;
+ } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) {
+ ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED;
}
}
@@ -470,6 +497,10 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
+ /* allow but do not refresh timeout */
+ if (ignore)
+ return NF_ACCEPT;
+
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 6892e497781c..e8c86ee4c1c4 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1152,7 +1152,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
&& (old_state == TCP_CONNTRACK_SYN_RECV
|| old_state == TCP_CONNTRACK_ESTABLISHED)
&& new_state == TCP_CONNTRACK_ESTABLISHED) {
- /* Set ASSURED if we see see valid ack in ESTABLISHED
+ /* Set ASSURED if we see valid ack in ESTABLISHED
after SYN_RECV or a valid answer for a picked up
connection. */
set_bit(IPS_ASSURED_BIT, &ct->status);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 760ca2422816..af402f458ee0 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -81,18 +81,6 @@ static bool udp_error(struct sk_buff *skb,
return false;
}
-static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
- struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- u32 extra_jiffies)
-{
- if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
- ct->status & IPS_NAT_CLASH))
- nf_ct_kill(ct);
- else
- nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
-}
-
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -124,12 +112,15 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
nf_ct_refresh_acct(ct, ctinfo, skb, extra);
+ /* never set ASSURED for IPS_NAT_CLASH, they time out soon */
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
@@ -206,12 +197,15 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
timeouts[UDP_CT_REPLIED]);
+
+ if (unlikely((ct->status & IPS_NAT_CLASH)))
+ return NF_ACCEPT;
+
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index fd814e514f94..b7dc1cbf40ea 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -815,11 +815,11 @@ static int nf_tables_gettable(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWTABLE, 0,
family, table);
if (err < 0)
- goto err;
+ goto err_fill_table_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_table_info:
kfree_skb(skb2);
return err;
}
@@ -1563,11 +1563,11 @@ static int nf_tables_getchain(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, 0,
family, table, chain);
if (err < 0)
- goto err;
+ goto err_fill_chain_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_chain_info:
kfree_skb(skb2);
return err;
}
@@ -3008,11 +3008,11 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWRULE, 0,
family, table, chain, rule, NULL);
if (err < 0)
- goto err;
+ goto err_fill_rule_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_rule_info:
kfree_skb(skb2);
return err;
}
@@ -3770,7 +3770,8 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
goto nla_put_failure;
}
- if (nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
+ if (set->udata &&
+ nla_put(skb, NFTA_SET_USERDATA, set->udlen, set->udata))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_SET_DESC);
@@ -3967,11 +3968,11 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
err = nf_tables_fill_set(skb2, &ctx, set, NFT_MSG_NEWSET, 0);
if (err < 0)
- goto err;
+ goto err_fill_set_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
-err:
+err_fill_set_info:
kfree_skb(skb2);
return err;
}
@@ -4859,24 +4860,18 @@ static int nft_get_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = -ENOMEM;
skb = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (skb == NULL)
- goto err1;
+ return err;
err = nf_tables_fill_setelem_info(skb, ctx, ctx->seq, ctx->portid,
NFT_MSG_NEWSETELEM, 0, set, &elem);
if (err < 0)
- goto err2;
+ goto err_fill_setelem;
- err = nfnetlink_unicast(skb, ctx->net, ctx->portid, MSG_DONTWAIT);
- /* This avoids a loop in nfnetlink. */
- if (err < 0)
- goto err1;
+ return nfnetlink_unicast(skb, ctx->net, ctx->portid);
- return 0;
-err2:
+err_fill_setelem:
kfree_skb(skb);
-err1:
- /* this avoids a loop in nfnetlink. */
- return err == -EAGAIN ? -ENOBUFS : err;
+ return err;
}
/* called with rcu_read_lock held */
@@ -6181,10 +6176,11 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
nlh->nlmsg_seq, NFT_MSG_NEWOBJ, 0,
family, table, obj, reset);
if (err < 0)
- goto err;
+ goto err_fill_obj_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_obj_info:
kfree_skb(skb2);
return err;
}
@@ -7044,10 +7040,11 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
NFT_MSG_NEWFLOWTABLE, 0, family,
flowtable, &flowtable->hook_list);
if (err < 0)
- goto err;
+ goto err_fill_flowtable_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_flowtable_info:
kfree_skb(skb2);
return err;
}
@@ -7233,10 +7230,11 @@ static int nf_tables_getgen(struct net *net, struct sock *nlsk,
err = nf_tables_fill_gen_info(skb2, net, NETLINK_CB(skb).portid,
nlh->nlmsg_seq);
if (err < 0)
- goto err;
+ goto err_fill_gen_info;
- return nlmsg_unicast(nlsk, skb2, NETLINK_CB(skb).portid);
-err:
+ return nfnetlink_unicast(skb2, net, NETLINK_CB(skb).portid);
+
+err_fill_gen_info:
kfree_skb(skb2);
return err;
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 5f24edf95830..3a2e64e13b22 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -149,10 +149,15 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
}
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
-int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
- int flags)
+int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid)
{
- return netlink_unicast(net->nfnl, skb, portid, flags);
+ int err;
+
+ err = nlmsg_unicast(net->nfnl, skb, portid);
+ if (err == -EAGAIN)
+ err = -ENOBUFS;
+
+ return err;
}
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index f02992419850..b35e8d9a5b37 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -356,8 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
goto out;
}
}
- nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
- MSG_DONTWAIT);
+ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid);
out:
inst->qlen = 0;
inst->skb = NULL;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index dadfc06245a3..d1d8bca03b4f 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -681,7 +681,7 @@ __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
*packet_id_ptr = htonl(entry->id);
/* nfnetlink_unicast will either free the nskb or add it to a socket */
- err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
+ err = nfnetlink_unicast(nskb, net, queue->peer_portid);
if (err < 0) {
if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
failopen = 1;
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 3b9b97aa4b32..3a6c84fb2c90 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -102,7 +102,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
}
if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
- ct->status & IPS_SEQ_ADJUST)
+ ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
goto out;
if (!nf_ct_is_confirmed(ct))
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index ed7cb9f747f6..7a2e59638499 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -87,7 +87,9 @@ void nft_payload_eval(const struct nft_expr *expr,
u32 *dest = &regs->data[priv->dreg];
int offset;
- dest[priv->len / NFT_REG32_SIZE] = 0;
+ if (priv->len % NFT_REG32_SIZE)
+ dest[priv->len / NFT_REG32_SIZE] = 0;
+
switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER:
if (!skb_mac_header_was_set(skb))
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 4b2834fd17b2..217ab3644c25 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -218,11 +218,11 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_rbtree_elem *new,
struct nft_set_ext **ext)
{
+ bool overlap = false, dup_end_left = false, dup_end_right = false;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
struct nft_rbtree_elem *rbe;
struct rb_node *parent, **p;
- bool overlap = false;
int d;
/* Detect overlaps as we descend the tree. Set the flag in these cases:
@@ -238,24 +238,44 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
*
* b1. _ _ __>| !_ _ __| (insert end before existing start)
* b2. _ _ ___| !_ _ _>| (insert end after existing start)
- * b3. _ _ ___! >|_ _ __| (insert start after existing end)
+ * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf)
+ * '--' no nodes falling in this range
+ * b4. >|_ _ ! (insert start before existing start)
*
* Case a3. resolves to b3.:
* - if the inserted start element is the leftmost, because the '0'
* element in the tree serves as end element
- * - otherwise, if an existing end is found. Note that end elements are
- * always inserted after corresponding start elements.
+ * - otherwise, if an existing end is found immediately to the left. If
+ * there are existing nodes in between, we need to further descend the
+ * tree before we can conclude the new start isn't causing an overlap
+ *
+ * or to b4., which, preceded by a3., means we already traversed one or
+ * more existing intervals entirely, from the right.
*
* For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
* in that order.
*
* The flag is also cleared in two special cases:
*
- * b4. |__ _ _!|<_ _ _ (insert start right before existing end)
- * b5. |__ _ >|!__ _ _ (insert end right after existing start)
+ * b5. |__ _ _!|<_ _ _ (insert start right before existing end)
+ * b6. |__ _ >|!__ _ _ (insert end right after existing start)
*
* which always happen as last step and imply that no further
* overlapping is possible.
+ *
+ * Another special case comes from the fact that start elements matching
+ * an already existing start element are allowed: insertion is not
+ * performed but we return -EEXIST in that case, and the error will be
+ * cleared by the caller if NLM_F_EXCL is not present in the request.
+ * This way, request for insertion of an exact overlap isn't reported as
+ * error to userspace if not desired.
+ *
+ * However, if the existing start matches a pre-existing start, but the
+ * end element doesn't match the corresponding pre-existing end element,
+ * we need to report a partial overlap. This is a local condition that
+ * can be noticed without need for a tracking flag, by checking for a
+ * local duplicated end for a corresponding start, from left and right,
+ * separately.
*/
parent = NULL;
@@ -272,26 +292,41 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
if (nft_rbtree_interval_start(new)) {
if (nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext, genmask) &&
- !nft_set_elem_expired(&rbe->ext))
+ !nft_set_elem_expired(&rbe->ext) && !*p)
overlap = false;
} else {
+ if (dup_end_left && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
+
+ if (overlap) {
+ dup_end_right = true;
+ continue;
+ }
}
} else if (d > 0) {
p = &parent->rb_right;
if (nft_rbtree_interval_end(new)) {
+ if (dup_end_right && !*p)
+ return -ENOTEMPTY;
+
overlap = nft_rbtree_interval_end(rbe) &&
nft_set_elem_active(&rbe->ext,
genmask) &&
!nft_set_elem_expired(&rbe->ext);
- } else if (nft_rbtree_interval_end(rbe) &&
- nft_set_elem_active(&rbe->ext, genmask) &&
+
+ if (overlap) {
+ dup_end_left = true;
+ continue;
+ }
+ } else if (nft_set_elem_active(&rbe->ext, genmask) &&
!nft_set_elem_expired(&rbe->ext)) {
- overlap = true;
+ overlap = nft_rbtree_interval_end(rbe);
}
} else {
if (nft_rbtree_interval_end(rbe) &&
@@ -316,6 +351,8 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
p = &parent->rb_left;
}
}
+
+ dup_end_left = dup_end_right = false;
}
if (overlap)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 19bef176145e..606411869698 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -640,7 +640,7 @@ static void __net_exit recent_proc_net_exit(struct net *net)
struct recent_table *t;
/* recent_net_exit() is called before recent_mt_destroy(). Make sure
- * that the parent xt_recent proc entry is is empty before trying to
+ * that the parent xt_recent proc entry is empty before trying to
* remove it.
*/
spin_lock_bh(&recent_lock);
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 38aaeadec13d..dc8c39f51f7d 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -85,6 +85,7 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
kfree(netlbl_domhsh_addr6_entry(iter6));
}
#endif /* IPv6 */
+ kfree(ptr->def.addrsel);
}
kfree(ptr->domain);
kfree(ptr);
@@ -537,6 +538,8 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#endif /* IPv6 */
+ /* cleanup the new entry since we've moved everything over */
+ netlbl_domhsh_free_entry(&entry->rcu);
} else
ret_val = -EINVAL;
@@ -580,6 +583,12 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
{
int ret_val = 0;
struct audit_buffer *audit_buf;
+ struct netlbl_af4list *iter4;
+ struct netlbl_domaddr4_map *map4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct netlbl_af6list *iter6;
+ struct netlbl_domaddr6_map *map6;
+#endif /* IPv6 */
if (entry == NULL)
return -ENOENT;
@@ -597,6 +606,9 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
ret_val = -ENOENT;
spin_unlock(&netlbl_domhsh_lock);
+ if (ret_val)
+ return ret_val;
+
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
@@ -605,40 +617,29 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_log_end(audit_buf);
}
- if (ret_val == 0) {
- struct netlbl_af4list *iter4;
- struct netlbl_domaddr4_map *map4;
-#if IS_ENABLED(CONFIG_IPV6)
- struct netlbl_af6list *iter6;
- struct netlbl_domaddr6_map *map6;
-#endif /* IPv6 */
-
- switch (entry->def.type) {
- case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach_rcu(iter4,
- &entry->def.addrsel->list4) {
- map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->def.cipso);
- }
+ switch (entry->def.type) {
+ case NETLBL_NLTYPE_ADDRSELECT:
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
+ map4 = netlbl_domhsh_addr4_entry(iter4);
+ cipso_v4_doi_putdef(map4->def.cipso);
+ }
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->def.addrsel->list6) {
- map6 = netlbl_domhsh_addr6_entry(iter6);
- calipso_doi_putdef(map6->def.calipso);
- }
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
+ map6 = netlbl_domhsh_addr6_entry(iter6);
+ calipso_doi_putdef(map6->def.calipso);
+ }
#endif /* IPv6 */
- break;
- case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->def.cipso);
- break;
+ break;
+ case NETLBL_NLTYPE_CIPSOV4:
+ cipso_v4_doi_putdef(entry->def.cipso);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- case NETLBL_NLTYPE_CALIPSO:
- calipso_doi_putdef(entry->def.calipso);
- break;
+ case NETLBL_NLTYPE_CALIPSO:
+ calipso_doi_putdef(entry->def.calipso);
+ break;
#endif /* IPv6 */
- }
- call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
}
+ call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
return ret_val;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5cee1d0eaebe..f9efd2c1cb50 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -353,7 +353,7 @@ static void netlink_rcv_wake(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (skb_queue_empty(&sk->sk_receive_queue))
+ if (skb_queue_empty_lockless(&sk->sk_receive_queue))
clear_bit(NETLINK_S_CONGESTED, &nlk->state);
if (!test_bit(NETLINK_S_CONGESTED, &nlk->state))
wake_up_interruptible(&nlk->wait);
diff --git a/net/netlink/policy.c b/net/netlink/policy.c
index 5c9e7530865f..62f977fa645a 100644
--- a/net/netlink/policy.c
+++ b/net/netlink/policy.c
@@ -188,7 +188,7 @@ send_attribute:
goto next;
case NLA_NESTED:
type = NL_ATTR_TYPE_NESTED;
- /* fall through */
+ fallthrough;
case NLA_NESTED_ARRAY:
if (pt->type == NLA_NESTED_ARRAY)
type = NL_ATTR_TYPE_NESTED_ARRAY;
diff --git a/net/netrom/nr_in.c b/net/netrom/nr_in.c
index 2bef3779f893..69e58906c32b 100644
--- a/net/netrom/nr_in.c
+++ b/net/netrom/nr_in.c
@@ -122,7 +122,7 @@ static int nr_state2_machine(struct sock *sk, struct sk_buff *skb,
case NR_DISCREQ:
nr_write_internal(sk, NR_DISCACK);
- /* fall through */
+ fallthrough;
case NR_DISCACK:
nr_disconnect(sk, 0);
break;
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 0891ee02ca4f..78da5eab252a 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -263,7 +263,7 @@ static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
case 3:
re_sort_routes(nr_node, 0, 1);
re_sort_routes(nr_node, 1, 2);
- /* fall through */
+ fallthrough;
case 2:
re_sort_routes(nr_node, 0, 1);
case 1:
@@ -356,7 +356,7 @@ static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct n
switch (i) {
case 0:
nr_node->routes[0] = nr_node->routes[1];
- /* fall through */
+ fallthrough;
case 1:
nr_node->routes[1] = nr_node->routes[2];
case 2:
@@ -479,7 +479,7 @@ static int nr_dec_obs(void)
switch (i) {
case 0:
s->routes[0] = s->routes[1];
- /* Fallthrough */
+ fallthrough;
case 1:
s->routes[1] = s->routes[2];
case 2:
@@ -526,7 +526,7 @@ void nr_rt_device_down(struct net_device *dev)
switch (i) {
case 0:
t->routes[0] = t->routes[1];
- /* fall through */
+ fallthrough;
case 1:
t->routes[1] = t->routes[2];
case 2:
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 98d393e70de3..a3f1204f1ed2 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -778,7 +778,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
}
}
/* Non-ICMP, fall thru to initialize if needed. */
- /* fall through */
+ fallthrough;
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
* or local packets.
@@ -1540,7 +1540,7 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
switch (type) {
case OVS_CT_ATTR_FORCE_COMMIT:
info->force = true;
- /* fall through. */
+ fallthrough;
case OVS_CT_ATTR_COMMIT:
info->commit = true;
break;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 03942c30d83e..b03d142ec82e 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -675,7 +675,7 @@ static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key)
case -EINVAL:
memset(&key->ip, 0, sizeof(key->ip));
memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr));
- /* fall-through */
+ fallthrough;
case -EPROTO:
skb->transport_header = skb->network_header;
error = 0;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 479c257ded73..2b33e977a905 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2170,7 +2170,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_USER;
- unsigned short macoff, netoff, hdrlen;
+ unsigned short macoff, hdrlen;
+ unsigned int netoff;
struct sk_buff *copy_skb = NULL;
struct timespec64 ts;
__u32 ts_status;
@@ -2239,6 +2240,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
macoff = netoff - maclen;
}
+ if (netoff > USHRT_MAX) {
+ atomic_inc(&po->tp_drops);
+ goto drop_n_restore;
+ }
if (po->tp_version <= TPACKET_V2) {
if (macoff + snaplen > po->rx_ring.frame_size) {
if (po->copy_thresh &&
@@ -4061,7 +4066,7 @@ static int packet_notifier(struct notifier_block *this,
case NETDEV_UNREGISTER:
if (po->mclist)
packet_dev_mclist_delete(dev, &po->mclist);
- /* fallthrough */
+ fallthrough;
case NETDEV_DOWN:
if (dev->ifindex == po->ifindex) {
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index e47d09aca4af..a1525916885a 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -368,7 +368,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
err = -EINVAL;
goto out;
}
- /* fall through */
+ fallthrough;
case PNS_PEP_DISABLE_REQ:
atomic_set(&pn->tx_credits, 0);
pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
@@ -385,7 +385,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
case PNS_PIPE_ALIGNED_DATA:
__skb_pull(skb, 1);
- /* fall through */
+ fallthrough;
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
@@ -417,11 +417,11 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
err = pipe_rcv_created(sk, skb);
if (err)
break;
- /* fall through */
+ fallthrough;
case PNS_PIPE_RESET_IND:
if (!pn->init_enable)
break;
- /* fall through */
+ fallthrough;
case PNS_PIPE_ENABLED_IND:
if (!pn_flow_safe(pn->tx_fc)) {
atomic_set(&pn->tx_credits, 1);
@@ -555,7 +555,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
switch (hdr->message_id) {
case PNS_PIPE_ALIGNED_DATA:
__skb_pull(skb, 1);
- /* fall through */
+ fallthrough;
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
diff --git a/net/rds/send.c b/net/rds/send.c
index 9a529a01cdc6..985d0b7713ac 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -934,7 +934,7 @@ static int rds_rm_size(struct msghdr *msg, int num_sgs,
case RDS_CMSG_ZCOPY_COOKIE:
zcopy_cookie = true;
- /* fall through */
+ fallthrough;
case RDS_CMSG_RDMA_DEST:
case RDS_CMSG_RDMA_MAP:
diff --git a/net/rose/rose_in.c b/net/rose/rose_in.c
index 0d4fab2be82b..6af786d66b03 100644
--- a/net/rose/rose_in.c
+++ b/net/rose/rose_in.c
@@ -216,7 +216,7 @@ static int rose_state4_machine(struct sock *sk, struct sk_buff *skb, int framety
switch (frametype) {
case ROSE_RESET_REQUEST:
rose_write_internal(sk, ROSE_RESET_CONFIRMATION);
- /* fall through */
+ fallthrough;
case ROSE_RESET_CONFIRMATION:
rose_stop_timer(sk);
rose_start_idletimer(sk);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 5277631fa14c..6e35703ff353 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -343,7 +343,7 @@ static int rose_del_node(struct rose_route_struct *rose_route,
case 0:
rose_node->neighbour[0] =
rose_node->neighbour[1];
- /* fall through */
+ fallthrough;
case 1:
rose_node->neighbour[1] =
rose_node->neighbour[2];
@@ -505,7 +505,7 @@ void rose_rt_device_down(struct net_device *dev)
switch (i) {
case 0:
t->neighbour[0] = t->neighbour[1];
- /* fall through */
+ fallthrough;
case 1:
t->neighbour[1] = t->neighbour[2];
case 2:
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index e6725a6de015..186c8a889b16 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -246,7 +246,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
ret = 0;
break;
}
- /* Fall through */
+ fallthrough;
default:
ret = -EBUSY;
break;
@@ -545,7 +545,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
rx->local = local;
rx->sk.sk_state = RXRPC_CLIENT_BOUND;
- /* Fall through */
+ fallthrough;
case RXRPC_CLIENT_BOUND:
if (!m->msg_name &&
@@ -553,7 +553,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
m->msg_name = &rx->connect_srx;
m->msg_namelen = sizeof(rx->connect_srx);
}
- /* Fall through */
+ fallthrough;
case RXRPC_SERVER_BOUND:
case RXRPC_SERVER_LISTENING:
ret = rxrpc_do_sendmsg(rx, m, len);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 6d29a3603a3e..884cff7bb169 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -488,7 +488,6 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_LAST, /* Received the last packet (at rxtx_top) */
RXRPC_CALL_TX_LAST, /* Last packet in Tx buffer (at rxtx_top) */
RXRPC_CALL_SEND_PING, /* A ping will need to be sent */
- RXRPC_CALL_PINGING, /* Ping in process */
RXRPC_CALL_RETRANS_TIMEOUT, /* Retransmission due to timeout occurred */
RXRPC_CALL_BEGAN_RX_TIMER, /* We began the expect_rx_by timer */
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
@@ -673,9 +672,13 @@ struct rxrpc_call {
rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
- /* ping management */
- rxrpc_serial_t ping_serial; /* Last ping sent */
- ktime_t ping_time; /* Time last ping sent */
+ /* RTT management */
+ rxrpc_serial_t rtt_serial[4]; /* Serial number of DATA or PING sent */
+ ktime_t rtt_sent_at[4]; /* Time packet sent */
+ unsigned long rtt_avail; /* Mask of available slots in bits 0-3,
+ * Mask of pending samples in 8-11 */
+#define RXRPC_CALL_RTT_AVAIL_MASK 0xf
+#define RXRPC_CALL_RTT_PEND_SHIFT 8
/* transmission-phase ACK management */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
@@ -1037,7 +1040,7 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
/*
* rtt.c
*/
-void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace,
+void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, int,
rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t);
unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool);
void rxrpc_peer_init_rtt(struct rxrpc_peer *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index 032ed76c0166..ef160566aa9a 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -622,7 +622,7 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
case RXRPC_CALL_SERVER_ACCEPTING:
__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
abort = true;
- /* fall through */
+ fallthrough;
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_discard;
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 38a46167523f..a40fae013942 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -153,6 +153,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call->rxnet = rxnet;
+ call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
atomic_inc(&rxnet->nr_calls);
return call;
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index f2a1a5dbb5a7..159e3eda7914 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -881,7 +881,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
rxrpc_activate_channels_locked(conn);
}
- /* fall through */
+ fallthrough;
case RXRPC_CONN_CLIENT_ACTIVE:
if (list_empty(&conn->waiting_calls)) {
rxrpc_deactivate_one_channel(conn, channel);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 767579328a06..667c44aa5a63 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -608,36 +608,57 @@ unlock:
}
/*
- * Process a requested ACK.
+ * See if there's a cached RTT probe to complete.
*/
-static void rxrpc_input_requested_ack(struct rxrpc_call *call,
- ktime_t resp_time,
- rxrpc_serial_t orig_serial,
- rxrpc_serial_t ack_serial)
+static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
+ ktime_t resp_time,
+ rxrpc_serial_t acked_serial,
+ rxrpc_serial_t ack_serial,
+ enum rxrpc_rtt_rx_trace type)
{
- struct rxrpc_skb_priv *sp;
- struct sk_buff *skb;
+ rxrpc_serial_t orig_serial;
+ unsigned long avail;
ktime_t sent_at;
- int ix;
+ bool matched = false;
+ int i;
- for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
- skb = call->rxtx_buffer[ix];
- if (!skb)
- continue;
+ avail = READ_ONCE(call->rtt_avail);
+ smp_rmb(); /* Read avail bits before accessing data. */
- sent_at = skb->tstamp;
- smp_rmb(); /* Read timestamp before serial. */
- sp = rxrpc_skb(skb);
- if (sp->hdr.serial != orig_serial)
+ for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
+ if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
continue;
- goto found;
- }
- return;
+ sent_at = call->rtt_sent_at[i];
+ orig_serial = call->rtt_serial[i];
+
+ if (orig_serial == acked_serial) {
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_mb(); /* Read data before setting avail bit */
+ set_bit(i, &call->rtt_avail);
+ if (type != rxrpc_rtt_rx_cancel)
+ rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
+ sent_at, resp_time);
+ else
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
+ orig_serial, acked_serial, 0, 0);
+ matched = true;
+ }
+
+ /* If a later serial is being acked, then mark this slot as
+ * being available.
+ */
+ if (after(acked_serial, orig_serial)) {
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
+ orig_serial, acked_serial, 0, 0);
+ clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb();
+ set_bit(i, &call->rtt_avail);
+ }
+ }
-found:
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
- orig_serial, ack_serial, sent_at, resp_time);
+ if (!matched)
+ trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
}
/*
@@ -682,27 +703,11 @@ static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
*/
static void rxrpc_input_ping_response(struct rxrpc_call *call,
ktime_t resp_time,
- rxrpc_serial_t orig_serial,
+ rxrpc_serial_t acked_serial,
rxrpc_serial_t ack_serial)
{
- rxrpc_serial_t ping_serial;
- ktime_t ping_time;
-
- ping_time = call->ping_time;
- smp_rmb();
- ping_serial = READ_ONCE(call->ping_serial);
-
- if (orig_serial == call->acks_lost_ping)
+ if (acked_serial == call->acks_lost_ping)
rxrpc_input_check_for_lost_ack(call);
-
- if (before(orig_serial, ping_serial) ||
- !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
- return;
- if (after(orig_serial, ping_serial))
- return;
-
- rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
- orig_serial, ack_serial, ping_time, resp_time);
}
/*
@@ -843,7 +848,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
struct rxrpc_ackinfo info;
u8 acks[RXRPC_MAXACKS];
} buf;
- rxrpc_serial_t acked_serial;
+ rxrpc_serial_t ack_serial, acked_serial;
rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
int nr_acks, offset, ioffset;
@@ -856,6 +861,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
}
offset += sizeof(buf.ack);
+ ack_serial = sp->hdr.serial;
acked_serial = ntohl(buf.ack.serial);
first_soft_ack = ntohl(buf.ack.firstPacket);
prev_pkt = ntohl(buf.ack.previousPacket);
@@ -864,31 +870,42 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
buf.ack.reason : RXRPC_ACK__INVALID);
- trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+ trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
first_soft_ack, prev_pkt,
summary.ack_reason, nr_acks);
- if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+ switch (buf.ack.reason) {
+ case RXRPC_ACK_PING_RESPONSE:
rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
- if (buf.ack.reason == RXRPC_ACK_REQUESTED)
- rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
- sp->hdr.serial);
+ ack_serial);
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_ping_response);
+ break;
+ case RXRPC_ACK_REQUESTED:
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_requested_ack);
+ break;
+ default:
+ if (acked_serial != 0)
+ rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
+ rxrpc_rtt_rx_cancel);
+ break;
+ }
if (buf.ack.reason == RXRPC_ACK_PING) {
- _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
+ _proto("Rx ACK %%%u PING Request", ack_serial);
rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ping);
} else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
- sp->hdr.serial, true, true,
+ ack_serial, true, true,
rxrpc_propose_ack_respond_to_ack);
}
/* Discard any out-of-order or duplicate ACKs (outside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
return;
@@ -904,7 +921,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
/* Discard any out-of-order or duplicate ACKs (inside lock). */
if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
- trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
first_soft_ack, call->ackr_first_seq,
prev_pkt, call->ackr_prev_seq);
goto out;
@@ -964,7 +981,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
RXRPC_TX_ANNO_LAST &&
summary.nr_acks == call->tx_top - hard_ack &&
rxrpc_is_client_call(call))
- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
false, true,
rxrpc_propose_ack_ping_for_lost_reply);
@@ -1084,7 +1101,7 @@ static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
switch (READ_ONCE(call->state)) {
case RXRPC_CALL_SERVER_AWAIT_ACK:
rxrpc_call_completed(call);
- /* Fall through */
+ fallthrough;
case RXRPC_CALL_COMPLETE:
break;
default:
@@ -1243,12 +1260,12 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
case RXRPC_PACKET_TYPE_BUSY:
if (rxrpc_to_server(sp))
goto discard;
- /* Fall through */
+ fallthrough;
case RXRPC_PACKET_TYPE_ACK:
case RXRPC_PACKET_TYPE_ACKALL:
if (sp->hdr.callNumber == 0)
goto bad_message;
- /* Fall through */
+ fallthrough;
case RXRPC_PACKET_TYPE_ABORT:
break;
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index c8b2097f499c..ede058f9cc15 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -162,7 +162,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
/* Fall through and set IPv4 options too otherwise we don't get
* errors from IPv4 packets sent through the IPv6 socket.
*/
- /* Fall through */
+ fallthrough;
case AF_INET:
/* we want to receive ICMP errors */
ip_sock_set_recverr(local->socket->sk);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 1ba43c3df4ad..3cfff7922ba8 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -124,6 +124,49 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
}
/*
+ * Record the beginning of an RTT probe.
+ */
+static int rxrpc_begin_rtt_probe(struct rxrpc_call *call, rxrpc_serial_t serial,
+ enum rxrpc_rtt_tx_trace why)
+{
+ unsigned long avail = call->rtt_avail;
+ int rtt_slot = 9;
+
+ if (!(avail & RXRPC_CALL_RTT_AVAIL_MASK))
+ goto no_slot;
+
+ rtt_slot = __ffs(avail & RXRPC_CALL_RTT_AVAIL_MASK);
+ if (!test_and_clear_bit(rtt_slot, &call->rtt_avail))
+ goto no_slot;
+
+ call->rtt_serial[rtt_slot] = serial;
+ call->rtt_sent_at[rtt_slot] = ktime_get_real();
+ smp_wmb(); /* Write data before avail bit */
+ set_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+
+ trace_rxrpc_rtt_tx(call, why, rtt_slot, serial);
+ return rtt_slot;
+
+no_slot:
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_no_slot, rtt_slot, serial);
+ return -1;
+}
+
+/*
+ * Cancel an RTT probe.
+ */
+static void rxrpc_cancel_rtt_probe(struct rxrpc_call *call,
+ rxrpc_serial_t serial, int rtt_slot)
+{
+ if (rtt_slot != -1) {
+ clear_bit(rtt_slot + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
+ smp_wmb(); /* Clear pending bit before setting slot */
+ set_bit(rtt_slot, &call->rtt_avail);
+ trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_cancel, rtt_slot, serial);
+ }
+}
+
+/*
* Send an ACK call packet.
*/
int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
@@ -136,7 +179,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
rxrpc_serial_t serial;
rxrpc_seq_t hard_ack, top;
size_t len, n;
- int ret;
+ int ret, rtt_slot = -1;
u8 reason;
if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
@@ -196,18 +239,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (_serial)
*_serial = serial;
- if (ping) {
- call->ping_serial = serial;
- smp_wmb();
- /* We need to stick a time in before we send the packet in case
- * the reply gets back before kernel_sendmsg() completes - but
- * asking UDP to send the packet can take a relatively long
- * time.
- */
- call->ping_time = ktime_get_real();
- set_bit(RXRPC_CALL_PINGING, &call->flags);
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
- }
+ if (ping)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_ping);
ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
conn->params.peer->last_tx_at = ktime_get_seconds();
@@ -221,8 +254,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
- if (ping)
- clear_bit(RXRPC_CALL_PINGING, &call->flags);
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohl(pkt->ack.serial),
false, true,
@@ -321,7 +353,7 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
struct kvec iov[2];
rxrpc_serial_t serial;
size_t len;
- int ret;
+ int ret, rtt_slot = -1;
_enter(",{%d}", skb->len);
@@ -397,6 +429,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
/* send the packet by UDP
* - returns -EMSGSIZE if UDP would have to fragment the packet
@@ -408,12 +442,15 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
conn->params.peer->last_tx_at = ktime_get_seconds();
up_read(&conn->params.local->defrag_sem);
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_nofrag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ }
+
rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
@@ -422,7 +459,6 @@ done:
if (ret >= 0) {
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = skb->tstamp;
- trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
if (call->peer->rtt_count > 1) {
unsigned long nowj = jiffies, ack_lost_at;
@@ -469,6 +505,8 @@ send_fragmentable:
sp->hdr.serial = serial;
smp_wmb(); /* Set serial before timestamp */
skb->tstamp = ktime_get_real();
+ if (whdr.flags & RXRPC_REQUEST_ACK)
+ rtt_slot = rxrpc_begin_rtt_probe(call, serial, rxrpc_rtt_tx_data);
switch (conn->params.local->srx.transport.family) {
case AF_INET6:
@@ -487,12 +525,14 @@ send_fragmentable:
BUG();
}
- if (ret < 0)
+ if (ret < 0) {
+ rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
trace_rxrpc_tx_fail(call->debug_id, serial, ret,
rxrpc_tx_point_call_data_frag);
- else
+ } else {
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ }
rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index a852f46d5234..be032850ae8c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -273,7 +273,7 @@ static void rxrpc_store_error(struct rxrpc_peer *peer,
case SO_EE_ORIGIN_ICMP6:
if (err == EACCES)
err = EHOSTUNREACH;
- /* Fall through */
+ fallthrough;
default:
_proto("Rx Received error report { orig=%u }", ee->ee_origin);
break;
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index ca29976bb193..68396d052052 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -502,11 +502,21 @@ EXPORT_SYMBOL(rxrpc_kernel_get_peer);
* rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT
* @sock: The socket on which the call is in progress.
* @call: The call to query
+ * @_srtt: Where to store the SRTT value.
*
- * Get the call's peer smoothed RTT.
+ * Get the call's peer smoothed RTT in uS.
*/
-u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
+bool rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call,
+ u32 *_srtt)
{
- return call->peer->srtt_us >> 3;
+ struct rxrpc_peer *peer = call->peer;
+
+ if (peer->rtt_count == 0) {
+ *_srtt = 1000000; /* 1S */
+ return false;
+ }
+
+ *_srtt = call->peer->srtt_us >> 3;
+ return true;
}
EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index efecc5a8f67d..c4684dde1f16 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -776,7 +776,7 @@ out:
case RXRPC_ACK_DELAY:
if (ret != -EAGAIN)
break;
- /* Fall through */
+ fallthrough;
default:
rxrpc_send_ack_packet(call, false, NULL);
}
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index 928d8b34a3ee..1221b0637a7e 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -146,6 +146,7 @@ static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
* exclusive access to the peer RTT data.
*/
void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ int rtt_slot,
rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
ktime_t send_time, ktime_t resp_time)
{
@@ -162,7 +163,7 @@ void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
peer->rtt_count++;
spin_unlock(&peer->rtt_input_lock);
- trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
+ trace_rxrpc_rtt_rx(call, why, rtt_slot, send_serial, resp_serial,
peer->srtt_us >> 3, peer->rto_j);
}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 52a24d4ef5d8..e08130e5746b 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1137,7 +1137,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
ret = -ENOMEM;
ticket = kmalloc(ticket_len, GFP_NOFS);
if (!ticket)
- goto temporary_error;
+ goto temporary_error_free_resp;
eproto = tracepoint_string("rxkad_tkt_short");
abort_code = RXKADPACKETSHORT;
@@ -1230,6 +1230,7 @@ protocol_error:
temporary_error_free_ticket:
kfree(ticket);
+temporary_error_free_resp:
kfree(response);
temporary_error:
/* Ignore the response packet if we got a temporary error such as
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index f3f6da6e4ad2..0824e103d037 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -241,7 +241,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
if (!last)
break;
- /* Fall through */
+ fallthrough;
case RXRPC_CALL_SERVER_SEND_REPLY:
call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
rxrpc_notify_end_tx(rx, call, notify_end_tx);
@@ -721,13 +721,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (p.call.timeouts.normal > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_rx_timo, j);
- /* Fall through */
+ fallthrough;
case 2:
j = msecs_to_jiffies(p.call.timeouts.idle);
if (p.call.timeouts.idle > 0 && j == 0)
j = 1;
WRITE_ONCE(call->next_req_timo, j);
- /* Fall through */
+ fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
j = msecs_to_jiffies(p.call.timeouts.hard);
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index 0618b63f87c4..7d37638ee1c7 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1670,7 +1670,7 @@ static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t,
case TC_ACT_QUEUED:
case TC_ACT_TRAP:
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
- /* fall through */
+ fallthrough;
case TC_ACT_SHOT:
return 0;
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index deac82f3ad7b..e89fab6ccb34 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -353,23 +353,11 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt,
FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
tb[TCA_RED_EARLY_DROP_BLOCK], extack);
if (err)
- goto err_early_drop_init;
-
- err = tcf_qevent_init(&q->qe_mark, sch,
- FLOW_BLOCK_BINDER_TYPE_RED_MARK,
- tb[TCA_RED_MARK_BLOCK], extack);
- if (err)
- goto err_mark_init;
-
- return 0;
+ return err;
-err_mark_init:
- tcf_qevent_destroy(&q->qe_early_drop, sch);
-err_early_drop_init:
- del_timer_sync(&q->adapt_timer);
- red_offload(sch, false);
- qdisc_put(q->qdisc);
- return err;
+ return tcf_qevent_init(&q->qe_mark, sch,
+ FLOW_BLOCK_BINDER_TYPE_RED_MARK,
+ tb[TCA_RED_MARK_BLOCK], extack);
}
static int red_change(struct Qdisc *sch, struct nlattr *opt,
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index e981992634dd..fe53c1e38c7d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1176,9 +1176,27 @@ static void taprio_offload_config_changed(struct taprio_sched *q)
spin_unlock(&q->current_entry_lock);
}
-static void taprio_sched_to_offload(struct taprio_sched *q,
+static u32 tc_map_to_queue_mask(struct net_device *dev, u32 tc_mask)
+{
+ u32 i, queue_mask = 0;
+
+ for (i = 0; i < dev->num_tc; i++) {
+ u32 offset, count;
+
+ if (!(tc_mask & BIT(i)))
+ continue;
+
+ offset = dev->tc_to_txq[i].offset;
+ count = dev->tc_to_txq[i].count;
+
+ queue_mask |= GENMASK(offset + count - 1, offset);
+ }
+
+ return queue_mask;
+}
+
+static void taprio_sched_to_offload(struct net_device *dev,
struct sched_gate_list *sched,
- const struct tc_mqprio_qopt *mqprio,
struct tc_taprio_qopt_offload *offload)
{
struct sched_entry *entry;
@@ -1193,7 +1211,8 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
e->command = entry->command;
e->interval = entry->interval;
- e->gate_mask = entry->gate_mask;
+ e->gate_mask = tc_map_to_queue_mask(dev, entry->gate_mask);
+
i++;
}
@@ -1201,7 +1220,6 @@ static void taprio_sched_to_offload(struct taprio_sched *q,
}
static int taprio_enable_offload(struct net_device *dev,
- struct tc_mqprio_qopt *mqprio,
struct taprio_sched *q,
struct sched_gate_list *sched,
struct netlink_ext_ack *extack)
@@ -1223,7 +1241,7 @@ static int taprio_enable_offload(struct net_device *dev,
return -ENOMEM;
}
offload->enable = 1;
- taprio_sched_to_offload(q, sched, mqprio, offload);
+ taprio_sched_to_offload(dev, sched, offload);
err = ops->ndo_setup_tc(dev, TC_SETUP_QDISC_TAPRIO, offload);
if (err < 0) {
@@ -1485,7 +1503,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
}
if (FULL_OFFLOAD_IS_ENABLED(q->flags))
- err = taprio_enable_offload(dev, mqprio, q, new_admin, extack);
+ err = taprio_enable_offload(dev, q, new_admin, extack);
else
err = taprio_disable_offload(dev, q, extack);
if (err)
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index aea2a982984d..8a58f42d6d19 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -875,7 +875,7 @@ static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp)
case AF_INET:
if (!__ipv6_only_sock(sctp_opt2sk(sp)))
return 1;
- /* fallthru */
+ fallthrough;
default:
return 0;
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 577e3bc4ee6f..3fd06a27105d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -912,7 +912,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
case SCTP_CID_ABORT:
if (sctp_test_T_bit(chunk))
ctx->packet->vtag = ctx->asoc->c.my_vtag;
- /* fallthru */
+ fallthrough;
/* The following chunks are "response" chunks, i.e.
* they are generated in response to something we
@@ -927,7 +927,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
case SCTP_CID_ECN_CWR:
case SCTP_CID_ASCONF_ACK:
one_packet = 1;
- /* Fall through */
+ fallthrough;
case SCTP_CID_SACK:
case SCTP_CID_HEARTBEAT:
@@ -1030,7 +1030,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
if (!ctx->packet || !ctx->packet->has_cookie_echo)
return;
- /* fall through */
+ fallthrough;
case SCTP_STATE_ESTABLISHED:
case SCTP_STATE_SHUTDOWN_PENDING:
case SCTP_STATE_SHUTDOWN_RECEIVED:
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 467bd77b6986..9a56ae2f3651 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2077,7 +2077,7 @@ static enum sctp_ierror sctp_process_unk_param(
break;
case SCTP_PARAM_ACTION_DISCARD_ERR:
retval = SCTP_IERROR_ERROR;
- /* Fall through */
+ fallthrough;
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9f36fe911d08..aa821e71f05e 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -1516,7 +1516,7 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type,
if (timer_pending(timer))
break;
- /* fall through */
+ fallthrough;
case SCTP_CMD_TIMER_START:
timer = &asoc->timers[cmd->obj.to];
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index e86620fbd90f..c669f8bd1eab 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -4315,7 +4315,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
- /* Fall Through */
+ fallthrough;
case SCTP_IERROR_AUTH_BAD_KEYID:
case SCTP_IERROR_BAD_SIG:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index ec1fba1fbe71..836615f71a7d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8060,8 +8060,6 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
pr_debug("%s: begins, snum:%d\n", __func__, snum);
- local_bh_disable();
-
if (snum == 0) {
/* Search for an available port. */
int low, high, remaining, index;
@@ -8079,20 +8077,21 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
continue;
index = sctp_phashfn(net, rover);
head = &sctp_port_hashtable[index];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain)
if ((pp->port == rover) &&
net_eq(net, pp->net))
goto next;
break;
next:
- spin_unlock(&head->lock);
+ spin_unlock_bh(&head->lock);
+ cond_resched();
} while (--remaining > 0);
/* Exhausted local port range during search? */
ret = 1;
if (remaining <= 0)
- goto fail;
+ return ret;
/* OK, here is the one we will use. HEAD (the port
* hash table list entry) is non-NULL and we hold it's
@@ -8107,7 +8106,7 @@ static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
* port iterator, pp being NULL.
*/
head = &sctp_port_hashtable[sctp_phashfn(net, snum)];
- spin_lock(&head->lock);
+ spin_lock_bh(&head->lock);
sctp_for_each_hentry(pp, &head->chain) {
if ((pp->port == snum) && net_eq(pp->net, net))
goto pp_found;
@@ -8207,10 +8206,7 @@ success:
ret = 0;
fail_unlock:
- spin_unlock(&head->lock);
-
-fail:
- local_bh_enable();
+ spin_unlock_bh(&head->lock);
return ret;
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 290270c821ca..0e7409e469c0 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -116,7 +116,6 @@ static void smc_close_cancel_work(struct smc_sock *smc)
cancel_work_sync(&smc->conn.close_work);
cancel_delayed_work_sync(&smc->conn.tx_work);
lock_sock(sk);
- sk->sk_state = SMC_CLOSED;
}
/* terminate smc socket abnormally - active abort
@@ -134,22 +133,22 @@ void smc_close_active_abort(struct smc_sock *smc)
}
switch (sk->sk_state) {
case SMC_ACTIVE:
- sk->sk_state = SMC_PEERABORTWAIT;
- smc_close_cancel_work(smc);
- sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* passive closing */
- break;
case SMC_APPCLOSEWAIT1:
case SMC_APPCLOSEWAIT2:
+ sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
- sock_put(sk); /* postponed passive closing */
+ sock_put(sk); /* (postponed) passive closing */
break;
case SMC_PEERCLOSEWAIT1:
case SMC_PEERCLOSEWAIT2:
case SMC_PEERFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
@@ -159,6 +158,8 @@ void smc_close_active_abort(struct smc_sock *smc)
case SMC_APPFINCLOSEWAIT:
sk->sk_state = SMC_PEERABORTWAIT;
smc_close_cancel_work(smc);
+ if (sk->sk_state != SMC_PEERABORTWAIT)
+ break;
sk->sk_state = SMC_CLOSED;
smc_conn_free(&smc->conn);
release_clcsock = true;
@@ -372,7 +373,7 @@ static void smc_close_passive_work(struct work_struct *work)
case SMC_PEERCLOSEWAIT1:
if (rxflags->peer_done_writing)
sk->sk_state = SMC_PEERCLOSEWAIT2;
- /* fall through */
+ fallthrough;
/* to check for closing */
case SMC_PEERCLOSEWAIT2:
if (!smc_cdc_rxed_any_close(conn))
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index b42fa3b00d00..a406627b1d55 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1356,6 +1356,8 @@ create:
if (ini->is_smcd) {
conn->rx_off = sizeof(struct smcd_cdc_msg);
smcd_cdc_rx_init(conn); /* init tasklet for this conn */
+ } else {
+ conn->rx_off = 0;
}
#ifndef KERNEL_HAS_ATOMIC64
spin_lock_init(&conn->acurs_lock);
@@ -1777,6 +1779,7 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
list_del(&smc->conn.sndbuf_desc->list);
mutex_unlock(&smc->conn.lgr->sndbufs_lock);
smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ smc->conn.sndbuf_desc = NULL;
}
return rc;
}
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index df5b0a6ea848..3ea33466ebe9 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -841,6 +841,9 @@ int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry)
struct smc_init_info ini;
int lnk_idx, rc = 0;
+ if (!llc->qp_mtu)
+ goto out_reject;
+
ini.vlan_id = lgr->vlan_id;
smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev);
if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) &&
@@ -917,10 +920,20 @@ out:
kfree(qentry);
}
+static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++)
+ if (llc->raw.data[i])
+ return false;
+ return true;
+}
+
static bool smc_llc_is_local_add_link(union smc_llc_msg *llc)
{
if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK &&
- !llc->add_link.qp_mtu && !llc->add_link.link_num)
+ smc_llc_is_empty_llc_message(llc))
return true;
return false;
}
diff --git a/net/socket.c b/net/socket.c
index e84a8e281b4c..82262e1922f9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -3612,7 +3612,7 @@ int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
EXPORT_SYMBOL(kernel_getsockname);
/**
- * kernel_peername - get the address which the socket is connected (kernel space)
+ * kernel_getpeername - get the address which the socket is connected (kernel space)
* @sock: socket
* @addr: address holder
*
@@ -3673,7 +3673,7 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
EXPORT_SYMBOL(kernel_sendpage_locked);
/**
- * kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ * kernel_sock_shutdown - shut down part of a full-duplex connection (kernel space)
* @sock: socket
* @how: connection part
*
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 90b8329fef82..8b300b74a722 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -137,7 +137,7 @@ gss_krb5_make_confounder(char *p, u32 conflen)
switch (conflen) {
case 16:
*q++ = i++;
- /* fall through */
+ fallthrough;
case 8:
*q++ = i++;
break;
diff --git a/net/sunrpc/auth_gss/trace.c b/net/sunrpc/auth_gss/trace.c
index d26036a57443..76685abba60f 100644
--- a/net/sunrpc/auth_gss/trace.c
+++ b/net/sunrpc/auth_gss/trace.c
@@ -9,7 +9,6 @@
#include <linux/sunrpc/svc_xprt.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
-#include <linux/sunrpc/auth_gss.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rpcgss.h>
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a91d1cdad9d7..62e0b6c1e8cf 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1702,7 +1702,7 @@ call_reserveresult(struct rpc_task *task)
switch (status) {
case -ENOMEM:
rpc_delay(task, HZ >> 2);
- /* fall through */
+ fallthrough;
case -EAGAIN: /* woken up; retry */
task->tk_action = call_retry_reserve;
return;
@@ -1759,13 +1759,13 @@ call_refreshresult(struct rpc_task *task)
/* Use rate-limiting and a max number of retries if refresh
* had status 0 but failed to update the cred.
*/
- /* fall through */
+ fallthrough;
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EAGAIN:
status = -EACCES;
- /* fall through */
+ fallthrough;
case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
@@ -2132,7 +2132,7 @@ call_connect_status(struct rpc_task *task)
rpc_force_rebind(clnt);
goto out_retry;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ECONNABORTED:
case -ENETDOWN:
@@ -2146,7 +2146,7 @@ call_connect_status(struct rpc_task *task)
break;
/* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EADDRINUSE:
case -ENOTCONN:
case -EAGAIN:
@@ -2228,7 +2228,7 @@ call_transmit_status(struct rpc_task *task)
*/
case -ENOBUFS:
rpc_delay(task, HZ>>2);
- /* fall through */
+ fallthrough;
case -EBADSLT:
case -EAGAIN:
task->tk_action = call_transmit;
@@ -2247,7 +2247,7 @@ call_transmit_status(struct rpc_task *task)
rpc_call_rpcerror(task, task->tk_status);
return;
}
- /* fall through */
+ fallthrough;
case -ECONNRESET:
case -ECONNABORTED:
case -EADDRINUSE:
@@ -2313,7 +2313,7 @@ call_bc_transmit_status(struct rpc_task *task)
break;
case -ENOBUFS:
rpc_delay(task, HZ>>2);
- /* fall through */
+ fallthrough;
case -EBADSLT:
case -EAGAIN:
task->tk_status = 0;
@@ -2380,7 +2380,7 @@ call_status(struct rpc_task *task)
* were a timeout.
*/
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -ETIMEDOUT:
break;
case -ECONNREFUSED:
@@ -2391,7 +2391,7 @@ call_status(struct rpc_task *task)
break;
case -EADDRINUSE:
rpc_delay(task, 3*HZ);
- /* fall through */
+ fallthrough;
case -EPIPE:
case -EAGAIN:
break;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6ba9d5842629..5a8e47bbfb9f 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1623,7 +1623,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
case -EAGAIN:
xprt_add_backlog(xprt, task);
dprintk("RPC: waiting for request slot\n");
- /* fall through */
+ fallthrough;
default:
task->tk_status = -EAGAIN;
}
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 75c646743df3..3f86d039875c 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -268,7 +268,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DEVICE_REMOVAL:
pr_info("rpcrdma: removing device %s for %pISpc\n",
ep->re_id->device->name, sap);
- /* fall through */
+ fallthrough;
case RDMA_CM_EVENT_ADDR_CHANGE:
ep->re_connect_status = -ENODEV;
goto disconnected;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c57aef829403..554e1bb4c1c7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -885,7 +885,7 @@ static int xs_local_send_request(struct rpc_rqst *req)
default:
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
- /* fall through */
+ fallthrough;
case -EPIPE:
xs_close(xprt);
status = -ENOTCONN;
@@ -1436,7 +1436,7 @@ static void xs_tcp_state_change(struct sock *sk)
xprt->connect_cookie++;
clear_bit(XPRT_CONNECTED, &xprt->state);
xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
- /* fall through */
+ fallthrough;
case TCP_CLOSING:
/*
* If the server closed down the connection, make sure that
@@ -2202,7 +2202,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
switch (ret) {
case 0:
xs_set_srcport(transport, sock);
- /* fall through */
+ fallthrough;
case -EINPROGRESS:
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
@@ -2255,7 +2255,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
default:
printk("%s: connect returned unhandled error %d\n",
__func__, status);
- /* fall through */
+ fallthrough;
case -EADDRNOTAVAIL:
/* We're probably in TIME_WAIT. Get rid of existing socket,
* and retry
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index c38babaa4e57..7c523dc81575 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -326,7 +326,8 @@ static void tipc_aead_free(struct rcu_head *rp)
if (aead->cloned) {
tipc_aead_put(aead->cloned);
} else {
- head = *this_cpu_ptr(aead->tfm_entry);
+ head = *get_cpu_ptr(aead->tfm_entry);
+ put_cpu_ptr(aead->tfm_entry);
list_for_each_entry_safe(tfm_entry, tmp, &head->list, list) {
crypto_free_aead(tfm_entry->tfm);
list_del(&tfm_entry->list);
@@ -399,10 +400,15 @@ static void tipc_aead_users_set(struct tipc_aead __rcu *aead, int val)
*/
static struct crypto_aead *tipc_aead_tfm_next(struct tipc_aead *aead)
{
- struct tipc_tfm **tfm_entry = this_cpu_ptr(aead->tfm_entry);
+ struct tipc_tfm **tfm_entry;
+ struct crypto_aead *tfm;
+ tfm_entry = get_cpu_ptr(aead->tfm_entry);
*tfm_entry = list_next_entry(*tfm_entry, list);
- return (*tfm_entry)->tfm;
+ tfm = (*tfm_entry)->tfm;
+ put_cpu_ptr(tfm_entry);
+
+ return tfm;
}
/**
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 89257e2a980d..588c2d2b0c69 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -536,7 +536,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
update = true;
deliver = false;
}
- /* Fall thru */
+ fallthrough;
case TIPC_GRP_BCAST_MSG:
m->bc_rcv_nxt++;
ack = msg_grp_bc_ack_req(hdr);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index fd5bfaab8661..dd93e8ecb2f4 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2770,18 +2770,21 @@ static int tipc_shutdown(struct socket *sock, int how)
trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
__tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
- sk->sk_shutdown = SEND_SHUTDOWN;
+ if (tipc_sk_type_connectionless(sk))
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ else
+ sk->sk_shutdown = SEND_SHUTDOWN;
if (sk->sk_state == TIPC_DISCONNECTING) {
/* Discard any unreceived messages */
__skb_queue_purge(&sk->sk_receive_queue);
- /* Wake up anyone sleeping in poll */
- sk->sk_state_change(sk);
res = 0;
} else {
res = -ENOTCONN;
}
+ /* Wake up anyone sleeping in poll. */
+ sk->sk_state_change(sk);
release_sock(sk);
return res;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 181ea6fb56a6..92784e51ee7d 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -837,7 +837,7 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
*/
case SOCK_RAW:
sock->type = SOCK_DGRAM;
- /* fall through */
+ fallthrough;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index e97a4f0c32a3..6a6f2f214c10 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -10,6 +10,7 @@
*/
#include <linux/export.h>
+#include <linux/bitfield.h>
#include <net/cfg80211.h>
#include "core.h"
#include "rdev-ops.h"
@@ -912,6 +913,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
struct ieee80211_sta_vht_cap *vht_cap;
struct ieee80211_edmg *edmg_cap;
u32 width, control_freq, cap;
+ bool support_80_80 = false;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
return false;
@@ -979,9 +981,13 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
break;
case NL80211_CHAN_WIDTH_80P80:
- cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- if (chandef->chan->band != NL80211_BAND_6GHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap = vht_cap->cap;
+ support_80_80 =
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
+ (cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
+ u32_get_bits(cap, IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) > 1;
+ if (chandef->chan->band != NL80211_BAND_6GHZ && !support_80_80)
return false;
fallthrough;
case NL80211_CHAN_WIDTH_80:
@@ -1001,7 +1007,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
return false;
cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
- cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
+ cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ &&
+ !(vht_cap->cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
return false;
break;
default:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 201d029687cc..52a35e788547 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -6129,7 +6129,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
params.he_6ghz_capa =
- nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
+ nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
params.airtime_weight =
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index dcd3d39a5372..0ab7808fcec8 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -3105,6 +3105,9 @@ int regulatory_hint_user(const char *alpha2,
if (WARN_ON(!alpha2))
return -EINVAL;
+ if (!is_world_regdom(alpha2) && !is_an_alpha2(alpha2))
+ return -EINVAL;
+
request = kzalloc(sizeof(struct regulatory_request), GFP_KERNEL);
if (!request)
return -ENOMEM;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7c5d5365a5eb..4a9ff9ef513f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -123,11 +123,13 @@ int ieee80211_freq_khz_to_channel(u32 freq)
return (freq - 2407) / 5;
else if (freq >= 4910 && freq <= 4980)
return (freq - 4000) / 5;
- else if (freq < 5945)
+ else if (freq < 5925)
return (freq - 5000) / 5;
+ else if (freq == 5935)
+ return 2;
else if (freq <= 45000) /* DMG band lower limit */
- /* see 802.11ax D4.1 27.3.22.2 */
- return (freq - 5940) / 5;
+ /* see 802.11ax D6.1 27.3.22.2 */
+ return (freq - 5950) / 5;
else if (freq >= 58320 && freq <= 70200)
return (freq - 56160) / 2160;
else
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 7fb327632272..8e1a49b0c0dc 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -98,7 +98,7 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
*vc_fac_mask |= X25_MASK_REVERSE;
break;
}
- /*fall through */
+ fallthrough;
case X25_FAC_THROUGHPUT:
facilities->throughput = p[1];
*vc_fac_mask |= X25_MASK_THROUGHPUT;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 4d3bb46aaae0..e1c4197af468 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -349,7 +349,7 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp
case X25_RESET_REQUEST:
x25_write_internal(sk, X25_RESET_CONFIRMATION);
- /* fall through */
+ fallthrough;
case X25_RESET_CONFIRMATION: {
x25_stop_timer(sk);
x25->condition = 0x00;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index d5280fd6f9c1..d622c2548d22 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -3410,7 +3410,7 @@ decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
switch (nexthdr) {
case NEXTHDR_FRAGMENT:
onlyproto = 1;
- /* fall through */
+ fallthrough;
case NEXTHDR_ROUTING:
case NEXTHDR_HOP:
case NEXTHDR_DEST: