summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c10
-rw-r--r--net/8021q/vlan_core.c18
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/Kconfig2
-rw-r--r--net/batman-adv/bat_iv_ogm.c8
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c24
-rw-r--r--net/batman-adv/debugfs.c8
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/main.h6
-rw-r--r--net/batman-adv/originator.c9
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/send.c2
-rw-r--r--net/batman-adv/soft-interface.c16
-rw-r--r--net/batman-adv/translation-table.c73
-rw-r--r--net/batman-adv/types.h7
-rw-r--r--net/batman-adv/unicast.c2
-rw-r--r--net/batman-adv/vis.c6
-rw-r--r--net/bridge/br_device.c9
-rw-r--r--net/bridge/br_if.c12
-rw-r--r--net/bridge/br_netlink.c5
-rw-r--r--net/bridge/br_notify.c2
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/core/dev.c595
-rw-r--r--net/core/ethtool.c45
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/net-sysfs.c175
-rw-r--r--net/core/netpoll.c631
-rw-r--r--net/core/rtnetlink.c104
-rw-r--r--net/core/skbuff.c94
-rw-r--r--net/dsa/slave.c10
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/af_inet.c3
-rw-r--r--net/ipv4/fib_frontend.c2
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c20
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/Makefile2
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/datagram.c19
-rw-r--r--net/ipv6/exthdrs.c3
-rw-r--r--net/ipv6/ip6_checksum.c97
-rw-r--r--net/ipv6/ip6_gre.c6
-rw-r--r--net/ipv6/ip6_input.c26
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/mcast.c27
-rw-r--r--net/ipv6/ndisc.c29
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c2
-rw-r--r--net/ipv6/route.c44
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c8
-rw-r--r--net/ipv6/udp.c34
-rw-r--r--net/ipv6/xfrm6_tunnel.c2
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/openvswitch/datapath.c3
-rw-r--r--net/openvswitch/vport-internal_dev.c3
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/sched/act_api.c18
-rw-r--r--net/sched/act_csum.c2
-rw-r--r--net/sched/act_gact.c5
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/act_mirred.c7
-rw-r--r--net/sched/act_nat.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/act_police.c5
-rw-r--r--net/sched/act_simple.c5
-rw-r--r--net/sched/act_skbedit.c5
-rw-r--r--net/sched/cls_api.c11
-rw-r--r--net/sched/cls_basic.c13
-rw-r--r--net/sched/cls_cgroup.c5
-rw-r--r--net/sched/cls_flow.c4
-rw-r--r--net/sched/cls_fw.c10
-rw-r--r--net/sched/cls_route.c15
-rw-r--r--net/sched/cls_rsvp.h4
-rw-r--r--net/sched/cls_tcindex.c14
-rw-r--r--net/sched/cls_u32.c13
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wireless/ethtool.c4
81 files changed, 1515 insertions, 903 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index a292e8050ef2..babfde9f734c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -105,6 +105,8 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
*/
unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(real_dev, dev);
+
if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
@@ -162,9 +164,13 @@ int register_vlan_dev(struct net_device *dev)
if (err < 0)
goto out_uninit_applicant;
+ err = netdev_upper_dev_link(real_dev, dev);
+ if (err)
+ goto out_uninit_applicant;
+
err = register_netdevice(dev);
if (err < 0)
- goto out_uninit_applicant;
+ goto out_upper_dev_unlink;
/* Account for reference in struct vlan_dev_priv */
dev_hold(real_dev);
@@ -180,6 +186,8 @@ int register_vlan_dev(struct net_device *dev)
return 0;
+out_upper_dev_unlink:
+ netdev_upper_dev_unlink(real_dev, dev);
out_uninit_applicant:
if (grp->nr_vlan_devs == 0)
vlan_gvrp_uninit_applicant(real_dev);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 65e06abe023f..380440b8ea89 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -60,21 +60,25 @@ bool vlan_do_receive(struct sk_buff **skbp)
return true;
}
-/* Must be invoked with rcu_read_lock or with RTNL. */
-struct net_device *__vlan_find_dev_deep(struct net_device *real_dev,
+/* Must be invoked with rcu_read_lock. */
+struct net_device *__vlan_find_dev_deep(struct net_device *dev,
u16 vlan_id)
{
- struct vlan_info *vlan_info = rcu_dereference_rtnl(real_dev->vlan_info);
+ struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
if (vlan_info) {
return vlan_group_get_device(&vlan_info->grp, vlan_id);
} else {
/*
- * Bonding slaves do not have grp assigned to themselves.
- * Grp is assigned to bonding master instead.
+ * Lower devices of master uppers (bonding, team) do not have
+ * grp assigned to themselves. Grp is assigned to upper device
+ * instead.
*/
- if (netif_is_bond_slave(real_dev))
- return __vlan_find_dev_deep(real_dev->master, vlan_id);
+ struct net_device *upper_dev;
+
+ upper_dev = netdev_master_upper_dev_get_rcu(dev);
+ if (upper_dev)
+ return __vlan_find_dev_deep(upper_dev, vlan_id);
}
return NULL;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 4a6d31a082b9..09f9108d4688 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -640,9 +640,9 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
static void vlan_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, vlan_fullname);
- strcpy(info->version, vlan_version);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, vlan_fullname, sizeof(info->driver));
+ strlcpy(info->version, vlan_version, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
diff --git a/net/Kconfig b/net/Kconfig
index 30b48f523135..3cc5be0fe420 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -232,7 +232,7 @@ config RFS_ACCEL
config XPS
boolean
- depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
+ depends on SMP && USE_GENERIC_SMP_HELPERS
default y
config NETPRIO_CGROUP
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 7d02ebd11a7f..8f2de43b7597 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -183,7 +183,6 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
/* adjust all flags and log packets */
while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
batadv_ogm_packet->tt_num_changes)) {
-
/* we might have aggregated direct link packets with an
* ordinary base packet
*/
@@ -261,7 +260,6 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
*/
if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
-
/* FIXME: what about aggregated packets ? */
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
@@ -325,7 +323,6 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
if (time_before(send_time, forw_packet->send_time) &&
time_after_eq(aggregation_end_time, forw_packet->send_time) &&
(aggregated_bytes <= BATADV_MAX_AGGREGATION_BYTES)) {
-
/* check aggregation compatibility
* -> direct link packets are broadcasted on
* their interface only
@@ -815,7 +812,6 @@ static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_neigh_node->neigh_list, list) {
-
if (!batadv_compare_eth(tmp_neigh_node->addr,
orig_neigh_node->orig))
continue;
@@ -949,7 +945,6 @@ batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
rcu_read_lock();
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
-
is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
orig_node->last_real_seqno,
seqno);
@@ -1033,7 +1028,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
is_single_hop_neigh = true;
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
+ "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %#.4x, changes %u, tq %d, TTL %d, V %d, IDF %d)\n",
ethhdr->h_source, if_incoming->net_dev->name,
if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
batadv_ogm_packet->prev_sender,
@@ -1223,7 +1218,6 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
/* is single hop (direct) neighbor */
if (is_single_hop_neigh) {
-
/* mark direct link on incoming interface */
batadv_iv_ogm_forward(orig_node, ethhdr, batadv_ogm_packet,
is_single_hop_neigh,
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 5aebe9327d68..5e834c1df469 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -57,7 +57,7 @@ static inline uint32_t batadv_choose_claim(const void *data, uint32_t size)
static inline uint32_t batadv_choose_backbone_gw(const void *data,
uint32_t size)
{
- struct batadv_claim *claim = (struct batadv_claim *)data;
+ const struct batadv_claim *claim = (struct batadv_claim *)data;
uint32_t hash = 0;
hash = batadv_hash_bytes(hash, &claim->addr, sizeof(claim->addr));
@@ -235,7 +235,6 @@ batadv_bla_del_backbone_claims(struct batadv_backbone_gw *backbone_gw)
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(claim, node, node_tmp,
head, hash_entry) {
-
if (claim->backbone_gw != backbone_gw)
continue;
@@ -338,7 +337,6 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, uint8_t *mac,
"bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
ethhdr->h_source, ethhdr->h_dest, vid);
break;
-
}
if (vid != -1)
@@ -539,7 +537,6 @@ static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
BATADV_CLAIM_TYPE_ANNOUNCE);
-
}
/**
@@ -598,7 +595,6 @@ static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
batadv_backbone_gw_free_ref(claim->backbone_gw);
-
}
/* set (new) backbone gw */
atomic_inc(&backbone_gw->refcount);
@@ -661,12 +657,12 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv,
crc = ntohs(*((__be16 *)(&an_addr[4])));
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
+ "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
vid, backbone_gw->orig, crc);
if (backbone_gw->crc != crc) {
batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
- "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
+ "handle_announce(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
backbone_gw->orig, backbone_gw->vid,
backbone_gw->crc, crc);
@@ -835,7 +831,7 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
/* if our mesh friends mac is bigger, use it for ourselves. */
if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
batadv_dbg(BATADV_DBG_BLA, bat_priv,
- "taking other backbones claim group: %04x\n",
+ "taking other backbones claim group: %#.4x\n",
ntohs(bla_dst->group));
bla_dst_own->group = bla_dst->group;
}
@@ -1626,10 +1622,10 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
primary_addr = primary_if->net_dev->dev_addr;
seq_printf(seq,
- "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
+ "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
net_dev->name, primary_addr,
ntohs(bat_priv->bla.claim_dest.group));
- seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
+ seq_printf(seq, " %-17s %-5s %-17s [o] (%-6s)\n",
"Client", "VID", "Originator", "CRC");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1638,7 +1634,7 @@ int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
is_own = batadv_compare_eth(claim->backbone_gw->orig,
primary_addr);
- seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
+ seq_printf(seq, " * %pM on % 5d by %pM [%c] (%#.4x)\n",
claim->addr, claim->vid,
claim->backbone_gw->orig,
(is_own ? 'x' : ' '),
@@ -1672,10 +1668,10 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
primary_addr = primary_if->net_dev->dev_addr;
seq_printf(seq,
- "Backbones announced for the mesh %s (orig %pM, group id %04x)\n",
+ "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
net_dev->name, primary_addr,
ntohs(bat_priv->bla.claim_dest.group));
- seq_printf(seq, " %-17s %-5s %-9s (%-4s)\n",
+ seq_printf(seq, " %-17s %-5s %-9s (%-6s)\n",
"Originator", "VID", "last seen", "CRC");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1693,7 +1689,7 @@ int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
continue;
seq_printf(seq,
- " * %pM on % 5d % 4i.%03is (%04x)\n",
+ " * %pM on % 5d % 4i.%03is (%#.4x)\n",
backbone_gw->orig, backbone_gw->vid,
secs, msecs, backbone_gw->crc);
}
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 6f58ddd53bff..55a90078dc66 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -164,7 +164,6 @@ static ssize_t batadv_log_read(struct file *file, char __user *buf,
buf++;
i++;
-
}
spin_unlock_bh(&debug_log->lock);
@@ -230,7 +229,6 @@ static void batadv_debug_log_cleanup(struct batadv_priv *bat_priv)
#else /* CONFIG_BATMAN_ADV_DEBUG */
static int batadv_debug_log_setup(struct batadv_priv *bat_priv)
{
- bat_priv->debug_log = NULL;
return 0;
}
@@ -397,10 +395,8 @@ err:
void batadv_debugfs_destroy(void)
{
- if (batadv_debugfs) {
- debugfs_remove_recursive(batadv_debugfs);
- batadv_debugfs = NULL;
- }
+ debugfs_remove_recursive(batadv_debugfs);
+ batadv_debugfs = NULL;
}
int batadv_debugfs_add_meshif(struct net_device *dev)
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index e05333905afd..ea0214824327 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -89,7 +89,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
*
* Returns the new hash value.
*/
-static inline uint32_t batadv_hash_bytes(uint32_t hash, void *data,
+static inline uint32_t batadv_hash_bytes(uint32_t hash, const void *data,
uint32_t size)
{
const unsigned char *key = data;
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 2f85577086a7..d04b209a8295 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -41,7 +41,7 @@
* -> TODO: check influence on BATADV_TQ_LOCAL_WINDOW_SIZE
*/
#define BATADV_PURGE_TIMEOUT 200000 /* 200 seconds */
-#define BATADV_TT_LOCAL_TIMEOUT 3600000 /* in milliseconds */
+#define BATADV_TT_LOCAL_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_CLIENT_ROAM_TIMEOUT 600000 /* in milliseconds */
#define BATADV_TT_CLIENT_TEMP_TIMEOUT 600000 /* in milliseconds */
#define BATADV_DAT_ENTRY_TIMEOUT (5*60000) /* 5 mins in milliseconds */
@@ -276,9 +276,7 @@ static inline bool batadv_has_timed_out(unsigned long timestamp,
static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
size_t count)
{
- int cpu = get_cpu();
- per_cpu_ptr(bat_priv->bat_counters, cpu)[idx] += count;
- put_cpu();
+ this_cpu_add(bat_priv->bat_counters[idx], count);
}
#define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 8c32cf1c2dec..fa88b2bec986 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -29,6 +29,9 @@
#include "soft-interface.h"
#include "bridge_loop_avoidance.h"
+/* hash class keys */
+static struct lock_class_key batadv_orig_hash_lock_class_key;
+
static void batadv_purge_orig(struct work_struct *work);
static void batadv_start_purge_timer(struct batadv_priv *bat_priv)
@@ -57,6 +60,9 @@ int batadv_originator_init(struct batadv_priv *bat_priv)
if (!bat_priv->orig_hash)
goto err;
+ batadv_hash_set_lock_class(bat_priv->orig_hash,
+ &batadv_orig_hash_lock_class_key);
+
batadv_start_purge_timer(bat_priv);
return 0;
@@ -178,7 +184,6 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
spin_lock_bh(list_lock);
hlist_for_each_entry_safe(orig_node, node, node_tmp,
head, hash_entry) {
-
hlist_del_rcu(node);
batadv_orig_node_free_ref(orig_node);
}
@@ -285,7 +290,6 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
/* for all neighbors towards this originator ... */
hlist_for_each_entry_safe(neigh_node, node, node_tmp,
&orig_node->neigh_list, list) {
-
last_seen = neigh_node->last_seen;
if_incoming = neigh_node->if_incoming;
@@ -293,7 +297,6 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
(if_incoming->if_status == BATADV_IF_INACTIVE) ||
(if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
(if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
-
if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
(if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
(if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1aa1722d0187..db89238d6767 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -80,7 +80,6 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
/* route added */
} else if ((!curr_router) && (neigh_node)) {
-
batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
"Adding route towards: %pM (via %pM)\n",
orig_node->orig, neigh_node->addr);
@@ -172,7 +171,6 @@ void batadv_bonding_candidate_add(struct batadv_orig_node *orig_node,
*/
hlist_for_each_entry_rcu(tmp_neigh_node, node,
&orig_node->neigh_list, list) {
-
if (tmp_neigh_node == neigh_node)
continue;
@@ -836,7 +834,6 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
if (unicast_packet->header.packet_type == BATADV_UNICAST_FRAG &&
batadv_frag_can_reassemble(skb,
neigh_node->if_incoming->net_dev->mtu)) {
-
ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
@@ -1103,7 +1100,6 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,
/* packet for me */
if (batadv_is_my_mac(unicast_packet->dest)) {
-
ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);
if (ret == NET_RX_DROP)
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 4425af9dad40..89810cea0530 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -330,7 +330,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->forw_bcast_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bcast_list, list) {
-
/* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
@@ -357,7 +356,6 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
spin_lock_bh(&bat_priv->forw_bat_list_lock);
hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
&bat_priv->forw_bat_list, list) {
-
/* if purge_outstanding_packets() was called with an argument
* we delete only packets belonging to the given interface
*/
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6b548fde8e04..3d6816667bfc 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -124,7 +124,6 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
}
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
@@ -181,7 +180,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
goto dropped;
/* Register the client MAC in the transtable */
- batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
+ if (!is_multicast_ether_addr(ethhdr->h_source))
+ batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
/* don't accept stp packets. STP does not help in meshes.
* better use the bridge loop avoidance ...
@@ -480,7 +480,9 @@ struct net_device *batadv_softif_create(const char *name)
atomic_set(&bat_priv->aggregated_ogms, 1);
atomic_set(&bat_priv->bonding, 0);
+#ifdef CONFIG_BATMAN_ADV_BLA
atomic_set(&bat_priv->bridge_loop_avoidance, 0);
+#endif
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_set(&bat_priv->distributed_arp_table, 1);
#endif
@@ -491,7 +493,9 @@ struct net_device *batadv_softif_create(const char *name)
atomic_set(&bat_priv->gw_bandwidth, 41);
atomic_set(&bat_priv->orig_interval, 1000);
atomic_set(&bat_priv->hop_penalty, 30);
+#ifdef CONFIG_BATMAN_ADV_DEBUG
atomic_set(&bat_priv->log_level, 0);
+#endif
atomic_set(&bat_priv->fragmentation, 1);
atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
@@ -581,10 +585,10 @@ static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void batadv_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "B.A.T.M.A.N. advanced");
- strcpy(info->version, BATADV_SOURCE_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, "batman");
+ strlcpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver));
+ strlcpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "batman", sizeof(info->bus_info));
}
static u32 batadv_get_msglevel(struct net_device *dev)
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 22457a7952ba..d4b27b65d6e9 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -29,6 +29,10 @@
#include <linux/crc16.h>
+/* hash class keys */
+static struct lock_class_key batadv_tt_local_hash_lock_class_key;
+static struct lock_class_key batadv_tt_global_hash_lock_class_key;
+
static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
struct batadv_orig_node *orig_node);
static void batadv_tt_purge(struct work_struct *work);
@@ -112,7 +116,6 @@ batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
struct batadv_tt_global_entry,
common);
return tt_global_entry;
-
}
static void
@@ -235,6 +238,9 @@ static int batadv_tt_local_init(struct batadv_priv *bat_priv)
if (!bat_priv->tt.local_hash)
return -ENOMEM;
+ batadv_hash_set_lock_class(bat_priv->tt.local_hash,
+ &batadv_tt_local_hash_lock_class_key);
+
return 0;
}
@@ -249,7 +255,6 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
batadv_choose_orig, tt_global->common.addr);
batadv_tt_global_entry_free_ref(tt_global);
-
}
void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
@@ -305,7 +310,11 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
(uint8_t)atomic_read(&bat_priv->tt.vn));
memcpy(tt_local->common.addr, addr, ETH_ALEN);
- tt_local->common.flags = BATADV_NO_FLAGS;
+ /* The local entry has to be marked as NEW to avoid to send it in
+ * a full table response going out before the next ttvn increment
+ * (consistency check)
+ */
+ tt_local->common.flags = BATADV_TT_CLIENT_NEW;
if (batadv_is_wifi_iface(ifindex))
tt_local->common.flags |= BATADV_TT_CLIENT_WIFI;
atomic_set(&tt_local->common.refcount, 2);
@@ -316,12 +325,6 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
if (batadv_compare_eth(addr, soft_iface->dev_addr))
tt_local->common.flags |= BATADV_TT_CLIENT_NOPURGE;
- /* The local entry has to be marked as NEW to avoid to send it in
- * a full table response going out before the next ttvn increment
- * (consistency check)
- */
- tt_local->common.flags |= BATADV_TT_CLIENT_NEW;
-
hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
batadv_choose_orig, &tt_local->common,
&tt_local->common.hash_entry);
@@ -472,18 +475,27 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
struct batadv_priv *bat_priv = netdev_priv(net_dev);
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common_entry;
+ struct batadv_tt_local_entry *tt_local;
struct batadv_hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
uint32_t i;
+ int last_seen_secs;
+ int last_seen_msecs;
+ unsigned long last_seen_jiffies;
+ bool no_purge;
+ uint16_t np_flag = BATADV_TT_CLIENT_NOPURGE;
primary_if = batadv_seq_print_text_primary_if_get(seq);
if (!primary_if)
goto out;
seq_printf(seq,
- "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
- net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
+ "Locally retrieved addresses (from %s) announced via TT (TTVN: %u CRC: %#.4x):\n",
+ net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn),
+ bat_priv->tt.local_crc);
+ seq_printf(seq, " %-13s %-7s %-10s\n", "Client", "Flags",
+ "Last seen");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -491,18 +503,29 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
rcu_read_lock();
hlist_for_each_entry_rcu(tt_common_entry, node,
head, hash_entry) {
- seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
+ tt_local = container_of(tt_common_entry,
+ struct batadv_tt_local_entry,
+ common);
+ last_seen_jiffies = jiffies - tt_local->last_seen;
+ last_seen_msecs = jiffies_to_msecs(last_seen_jiffies);
+ last_seen_secs = last_seen_msecs / 1000;
+ last_seen_msecs = last_seen_msecs % 1000;
+
+ no_purge = tt_common_entry->flags & np_flag;
+
+ seq_printf(seq, " * %pM [%c%c%c%c%c] %3u.%03u\n",
tt_common_entry->addr,
(tt_common_entry->flags &
BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
- (tt_common_entry->flags &
- BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
+ no_purge ? 'P' : '.',
(tt_common_entry->flags &
BATADV_TT_CLIENT_NEW ? 'N' : '.'),
(tt_common_entry->flags &
BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
(tt_common_entry->flags &
- BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
+ BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
+ no_purge ? last_seen_secs : 0,
+ no_purge ? last_seen_msecs : 0);
}
rcu_read_unlock();
}
@@ -627,7 +650,6 @@ static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
batadv_tt_local_purge_list(bat_priv, head);
spin_unlock_bh(list_lock);
}
-
}
static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
@@ -676,6 +698,9 @@ static int batadv_tt_global_init(struct batadv_priv *bat_priv)
if (!bat_priv->tt.global_hash)
return -ENOMEM;
+ batadv_hash_set_lock_class(bat_priv->tt.global_hash,
+ &batadv_tt_global_hash_lock_class_key);
+
return 0;
}
@@ -967,10 +992,11 @@ batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
best_entry = batadv_transtable_best_orig(tt_global_entry);
if (best_entry) {
last_ttvn = atomic_read(&best_entry->orig_node->last_ttvn);
- seq_printf(seq, " %c %pM (%3u) via %pM (%3u) [%c%c%c]\n",
+ seq_printf(seq,
+ " %c %pM (%3u) via %pM (%3u) (%#.4x) [%c%c%c]\n",
'*', tt_global_entry->common.addr,
best_entry->ttvn, best_entry->orig_node->orig,
- last_ttvn,
+ last_ttvn, best_entry->orig_node->tt_crc,
(flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
(flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
(flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
@@ -1012,8 +1038,9 @@ int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
seq_printf(seq,
"Globally announced TT entries received via the mesh %s\n",
net_dev->name);
- seq_printf(seq, " %-13s %s %-15s %s %s\n",
- "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
+ seq_printf(seq, " %-13s %s %-15s %s (%-6s) %s\n",
+ "Client", "(TTVN)", "Originator", "(Curr TTVN)", "CRC",
+ "Flags");
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -1049,7 +1076,6 @@ batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
batadv_tt_orig_list_entry_free_ref(orig_entry);
}
spin_unlock_bh(&tt_global_entry->list_lock);
-
}
static void
@@ -1825,7 +1851,6 @@ out:
if (!ret)
kfree_skb(skb);
return ret;
-
}
static bool
@@ -2352,7 +2377,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
}
spin_unlock_bh(list_lock);
}
-
}
static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
@@ -2496,7 +2520,7 @@ void batadv_tt_update_orig(struct batadv_priv *bat_priv,
orig_node->tt_crc != tt_crc) {
request_table:
batadv_dbg(BATADV_DBG_TT, bat_priv,
- "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
+ "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %#.4x last_crc: %#.4x num_changes: %u)\n",
orig_node->orig, ttvn, orig_ttvn, tt_crc,
orig_node->tt_crc, tt_num_changes);
batadv_send_tt_request(bat_priv, orig_node, ttvn,
@@ -2549,7 +2573,6 @@ bool batadv_tt_local_client_is_roaming(struct batadv_priv *bat_priv,
batadv_tt_local_entry_free_ref(tt_local_entry);
out:
return ret;
-
}
bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ae9ac9aca8c5..d8061ac28409 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -119,7 +119,6 @@ struct batadv_orig_node {
spinlock_t ogm_cnt_lock;
/* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */
spinlock_t bcast_seqno_lock;
- spinlock_t tt_list_lock; /* protects tt_list */
atomic_t bond_candidates;
struct list_head bond_list;
};
@@ -273,7 +272,9 @@ struct batadv_priv {
atomic_t bonding; /* boolean */
atomic_t fragmentation; /* boolean */
atomic_t ap_isolation; /* boolean */
+#ifdef CONFIG_BATMAN_ADV_BLA
atomic_t bridge_loop_avoidance; /* boolean */
+#endif
#ifdef CONFIG_BATMAN_ADV_DAT
atomic_t distributed_arp_table; /* boolean */
#endif
@@ -283,12 +284,16 @@ struct batadv_priv {
atomic_t gw_bandwidth; /* gw bandwidth */
atomic_t orig_interval; /* uint */
atomic_t hop_penalty; /* uint */
+#ifdef CONFIG_BATMAN_ADV_DEBUG
atomic_t log_level; /* uint */
+#endif
atomic_t bcast_seqno;
atomic_t bcast_queue_left;
atomic_t batman_queue_left;
char num_ifaces;
+#ifdef CONFIG_BATMAN_ADV_DEBUG
struct batadv_debug_log *debug_log;
+#endif
struct kobject *mesh_obj;
struct dentry *debug_dir;
struct hlist_head forw_bat_list;
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 10aff49fcf25..f56bccf4229e 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -133,7 +133,6 @@ batadv_frag_search_packet(struct list_head *head,
is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD);
list_for_each_entry(tfp, head, list) {
-
if (!tfp->skb)
continue;
@@ -162,7 +161,6 @@ void batadv_frag_list_free(struct list_head *head)
struct batadv_frag_packet_list_entry *pf, *tmp_pf;
if (!list_empty(head)) {
-
list_for_each_entry_safe(pf, tmp_pf, head, list) {
kfree_skb(pf->skb);
list_del(&pf->list);
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 0f65a9de5f74..60eb9b7ca8d1 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -28,6 +28,9 @@
#define BATADV_MAX_VIS_PACKET_SIZE 1000
+/* hash class keys */
+static struct lock_class_key batadv_vis_hash_lock_class_key;
+
static void batadv_start_vis_timer(struct batadv_priv *bat_priv);
/* free the info */
@@ -852,6 +855,9 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
goto err;
}
+ batadv_hash_set_lock_class(bat_priv->vis.hash,
+ &batadv_vis_hash_lock_class_key);
+
bat_priv->vis.my_info = kmalloc(BATADV_MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->vis.my_info)
goto err;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 7c78e2640190..e1bc090bc00a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -172,7 +172,6 @@ static int br_set_mac_address(struct net_device *dev, void *p)
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
br_fdb_change_mac_address(br, addr->sa_data);
br_stp_change_bridge_id(br, addr->sa_data);
@@ -185,10 +184,10 @@ static int br_set_mac_address(struct net_device *dev, void *p)
static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "bridge");
- strcpy(info->version, BR_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, "N/A");
+ strlcpy(info->driver, "bridge", sizeof(info->driver));
+ strlcpy(info->version, BR_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
static netdev_features_t br_fix_features(struct net_device *dev,
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 37fe693471a8..2148d474a04f 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -66,14 +66,14 @@ void br_port_carrier_check(struct net_bridge_port *p)
struct net_device *dev = p->dev;
struct net_bridge *br = p->br;
- if (netif_running(dev) && netif_carrier_ok(dev))
+ if (netif_running(dev) && netif_oper_up(dev))
p->path_cost = port_cost(dev);
if (!netif_running(br->dev))
return;
spin_lock_bh(&br->lock);
- if (netif_running(dev) && netif_carrier_ok(dev)) {
+ if (netif_running(dev) && netif_oper_up(dev)) {
if (p->state == BR_STATE_DISABLED)
br_stp_enable_port(p);
} else {
@@ -148,7 +148,7 @@ static void del_nbp(struct net_bridge_port *p)
netdev_rx_handler_unregister(dev);
synchronize_net();
- netdev_set_master(dev, NULL);
+ netdev_upper_dev_unlink(dev, br->dev);
br_multicast_del_port(p);
@@ -364,7 +364,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
goto err3;
- err = netdev_set_master(dev, br->dev);
+ err = netdev_master_upper_dev_link(dev, br->dev);
if (err)
goto err4;
@@ -383,7 +383,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
- if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
+ if (netif_running(dev) && netif_oper_up(dev) &&
(br->dev->flags & IFF_UP))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
@@ -403,7 +403,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return 0;
err5:
- netdev_set_master(dev, NULL);
+ netdev_upper_dev_unlink(dev, br->dev);
err4:
br_netpoll_disable(p);
err3:
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 5dc66abcc9e2..39ca9796f3f7 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -181,8 +181,11 @@ static int br_set_port_state(struct net_bridge_port *p, u8 state)
if (p->br->stp_enabled == BR_KERNEL_STP)
return -EBUSY;
+ /* if device is not up, change is not allowed
+ * if link is not present, only allowable state is disabled
+ */
if (!netif_running(p->dev) ||
- (!netif_carrier_ok(p->dev) && state != BR_STATE_DISABLED))
+ (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
return -ENETDOWN;
p->state = state;
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index a76b62135558..1644b3e1f947 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -82,7 +82,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
break;
case NETDEV_UP:
- if (netif_carrier_ok(dev) && (br->dev->flags & IFF_UP)) {
+ if (netif_running(br->dev) && netif_oper_up(dev)) {
spin_lock_bh(&br->lock);
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9d5a414a3943..7b5197c7de13 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -54,7 +54,7 @@ void br_stp_enable_bridge(struct net_bridge *br)
br_config_bpdu_generation(br);
list_for_each_entry(p, &br->port_list, list) {
- if ((p->dev->flags & IFF_UP) && netif_carrier_ok(p->dev))
+ if (netif_running(p->dev) && netif_oper_up(p->dev))
br_stp_enable_port(p);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index f64e439b4a00..862eaa744a54 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1857,6 +1857,228 @@ static void netif_setup_tc(struct net_device *dev, unsigned int txq)
}
}
+#ifdef CONFIG_XPS
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P) \
+ rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
+ int cpu, u16 index)
+{
+ struct xps_map *map = NULL;
+ int pos;
+
+ if (dev_maps)
+ map = xmap_dereference(dev_maps->cpu_map[cpu]);
+
+ for (pos = 0; map && pos < map->len; pos++) {
+ if (map->queues[pos] == index) {
+ if (map->len > 1) {
+ map->queues[pos] = map->queues[--map->len];
+ } else {
+ RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
+ kfree_rcu(map, rcu);
+ map = NULL;
+ }
+ break;
+ }
+ }
+
+ return map;
+}
+
+static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
+{
+ struct xps_dev_maps *dev_maps;
+ int cpu, i;
+ bool active = false;
+
+ mutex_lock(&xps_map_mutex);
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ if (!dev_maps)
+ goto out_no_maps;
+
+ for_each_possible_cpu(cpu) {
+ for (i = index; i < dev->num_tx_queues; i++) {
+ if (!remove_xps_queue(dev_maps, cpu, i))
+ break;
+ }
+ if (i == dev->num_tx_queues)
+ active = true;
+ }
+
+ if (!active) {
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ kfree_rcu(dev_maps, rcu);
+ }
+
+ for (i = index; i < dev->num_tx_queues; i++)
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
+ NUMA_NO_NODE);
+
+out_no_maps:
+ mutex_unlock(&xps_map_mutex);
+}
+
+static struct xps_map *expand_xps_map(struct xps_map *map,
+ int cpu, u16 index)
+{
+ struct xps_map *new_map;
+ int alloc_len = XPS_MIN_MAP_ALLOC;
+ int i, pos;
+
+ for (pos = 0; map && pos < map->len; pos++) {
+ if (map->queues[pos] != index)
+ continue;
+ return map;
+ }
+
+ /* Need to add queue to this CPU's existing map */
+ if (map) {
+ if (pos < map->alloc_len)
+ return map;
+
+ alloc_len = map->alloc_len * 2;
+ }
+
+ /* Need to allocate new map to store queue on this CPU's map */
+ new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
+ cpu_to_node(cpu));
+ if (!new_map)
+ return NULL;
+
+ for (i = 0; i < pos; i++)
+ new_map->queues[i] = map->queues[i];
+ new_map->alloc_len = alloc_len;
+ new_map->len = pos;
+
+ return new_map;
+}
+
+int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, u16 index)
+{
+ struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
+ struct xps_map *map, *new_map;
+ int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
+ int cpu, numa_node_id = -2;
+ bool active = false;
+
+ mutex_lock(&xps_map_mutex);
+
+ dev_maps = xmap_dereference(dev->xps_maps);
+
+ /* allocate memory for queue storage */
+ for_each_online_cpu(cpu) {
+ if (!cpumask_test_cpu(cpu, mask))
+ continue;
+
+ if (!new_dev_maps)
+ new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
+ if (!new_dev_maps)
+ return -ENOMEM;
+
+ map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
+ NULL;
+
+ map = expand_xps_map(map, cpu, index);
+ if (!map)
+ goto error;
+
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
+ }
+
+ if (!new_dev_maps)
+ goto out_no_new_maps;
+
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
+ /* add queue to CPU maps */
+ int pos = 0;
+
+ map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
+ while ((pos < map->len) && (map->queues[pos] != index))
+ pos++;
+
+ if (pos == map->len)
+ map->queues[map->len++] = index;
+#ifdef CONFIG_NUMA
+ if (numa_node_id == -2)
+ numa_node_id = cpu_to_node(cpu);
+ else if (numa_node_id != cpu_to_node(cpu))
+ numa_node_id = -1;
+#endif
+ } else if (dev_maps) {
+ /* fill in the new device map from the old device map */
+ map = xmap_dereference(dev_maps->cpu_map[cpu]);
+ RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
+ }
+
+ }
+
+ rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+
+ /* Cleanup old maps */
+ if (dev_maps) {
+ for_each_possible_cpu(cpu) {
+ new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
+ map = xmap_dereference(dev_maps->cpu_map[cpu]);
+ if (map && map != new_map)
+ kfree_rcu(map, rcu);
+ }
+
+ kfree_rcu(dev_maps, rcu);
+ }
+
+ dev_maps = new_dev_maps;
+ active = true;
+
+out_no_new_maps:
+ /* update Tx queue numa node */
+ netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
+ (numa_node_id >= 0) ? numa_node_id :
+ NUMA_NO_NODE);
+
+ if (!dev_maps)
+ goto out_no_maps;
+
+ /* removes queue from unused CPUs */
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
+ continue;
+
+ if (remove_xps_queue(dev_maps, cpu, index))
+ active = true;
+ }
+
+ /* free map if not active */
+ if (!active) {
+ RCU_INIT_POINTER(dev->xps_maps, NULL);
+ kfree_rcu(dev_maps, rcu);
+ }
+
+out_no_maps:
+ mutex_unlock(&xps_map_mutex);
+
+ return 0;
+error:
+ /* remove any maps that we added */
+ for_each_possible_cpu(cpu) {
+ new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
+ map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
+ NULL;
+ if (new_map && new_map != map)
+ kfree(new_map);
+ }
+
+ mutex_unlock(&xps_map_mutex);
+
+ kfree(new_dev_maps);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(netif_set_xps_queue);
+
+#endif
/*
* Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
* greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
@@ -1880,8 +2102,12 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc)
netif_setup_tc(dev, txq);
- if (txq < dev->real_num_tx_queues)
+ if (txq < dev->real_num_tx_queues) {
qdisc_reset_all_tx_gt(dev, txq);
+#ifdef CONFIG_XPS
+ netif_reset_xps_queues_gt(dev, txq);
+#endif
+ }
}
dev->real_num_tx_queues = txq;
@@ -2495,43 +2721,71 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
#endif
}
-struct netdev_queue *netdev_pick_tx(struct net_device *dev,
- struct sk_buff *skb)
+u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
- int queue_index;
- const struct net_device_ops *ops = dev->netdev_ops;
-
- if (dev->real_num_tx_queues == 1)
- queue_index = 0;
- else if (ops->ndo_select_queue) {
- queue_index = ops->ndo_select_queue(dev, skb);
- queue_index = dev_cap_txqueue(dev, queue_index);
- } else {
- struct sock *sk = skb->sk;
- queue_index = sk_tx_queue_get(sk);
-
- if (queue_index < 0 || skb->ooo_okay ||
- queue_index >= dev->real_num_tx_queues) {
- int old_index = queue_index;
+ struct sock *sk = skb->sk;
+ int queue_index = sk_tx_queue_get(sk);
- queue_index = get_xps_queue(dev, skb);
- if (queue_index < 0)
- queue_index = skb_tx_hash(dev, skb);
+ if (queue_index < 0 || skb->ooo_okay ||
+ queue_index >= dev->real_num_tx_queues) {
+ int new_index = get_xps_queue(dev, skb);
+ if (new_index < 0)
+ new_index = skb_tx_hash(dev, skb);
- if (queue_index != old_index && sk) {
- struct dst_entry *dst =
+ if (queue_index != new_index && sk) {
+ struct dst_entry *dst =
rcu_dereference_check(sk->sk_dst_cache, 1);
- if (dst && skb_dst(skb) == dst)
- sk_tx_queue_set(sk, queue_index);
- }
+ if (dst && skb_dst(skb) == dst)
+ sk_tx_queue_set(sk, queue_index);
+
}
+
+ queue_index = new_index;
+ }
+
+ return queue_index;
+}
+EXPORT_SYMBOL(__netdev_pick_tx);
+
+struct netdev_queue *netdev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ int queue_index = 0;
+
+ if (dev->real_num_tx_queues != 1) {
+ const struct net_device_ops *ops = dev->netdev_ops;
+ if (ops->ndo_select_queue)
+ queue_index = ops->ndo_select_queue(dev, skb);
+ else
+ queue_index = __netdev_pick_tx(dev, skb);
+ queue_index = dev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
return netdev_get_tx_queue(dev, queue_index);
}
+static void qdisc_pkt_len_init(struct sk_buff *skb)
+{
+ const struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ qdisc_skb_cb(skb)->pkt_len = skb->len;
+
+ /* To get more precise estimation of bytes sent on wire,
+ * we add to pkt_len the headers size of all segments
+ */
+ if (shinfo->gso_size) {
+ unsigned int hdr_len = skb_transport_offset(skb);
+
+ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ hdr_len += tcp_hdrlen(skb);
+ else
+ hdr_len += sizeof(struct udphdr);
+ qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len;
+ }
+}
+
static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
struct net_device *dev,
struct netdev_queue *txq)
@@ -2540,7 +2794,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
bool contended;
int rc;
- qdisc_skb_cb(skb)->pkt_len = skb->len;
+ qdisc_pkt_len_init(skb);
qdisc_calculate_pkt_len(skb, q);
/*
* Heuristic to force contended enqueues to serialize on a
@@ -3352,7 +3606,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
orig_dev = skb->dev;
skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
+ if (!skb_transport_header_was_set(skb))
+ skb_reset_transport_header(skb);
skb_reset_mac_len(skb);
pt_prev = NULL;
@@ -4600,64 +4855,231 @@ static int __init dev_proc_init(void)
#endif /* CONFIG_PROC_FS */
+struct netdev_upper {
+ struct net_device *dev;
+ bool master;
+ struct list_head list;
+ struct rcu_head rcu;
+ struct list_head search_list;
+};
+
+static void __append_search_uppers(struct list_head *search_list,
+ struct net_device *dev)
+{
+ struct netdev_upper *upper;
+
+ list_for_each_entry(upper, &dev->upper_dev_list, list) {
+ /* check if this upper is not already in search list */
+ if (list_empty(&upper->search_list))
+ list_add_tail(&upper->search_list, search_list);
+ }
+}
+
+static bool __netdev_search_upper_dev(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ LIST_HEAD(search_list);
+ struct netdev_upper *upper;
+ struct netdev_upper *tmp;
+ bool ret = false;
+
+ __append_search_uppers(&search_list, dev);
+ list_for_each_entry(upper, &search_list, search_list) {
+ if (upper->dev == upper_dev) {
+ ret = true;
+ break;
+ }
+ __append_search_uppers(&search_list, upper->dev);
+ }
+ list_for_each_entry_safe(upper, tmp, &search_list, search_list)
+ INIT_LIST_HEAD(&upper->search_list);
+ return ret;
+}
+
+static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ struct netdev_upper *upper;
+
+ list_for_each_entry(upper, &dev->upper_dev_list, list) {
+ if (upper->dev == upper_dev)
+ return upper;
+ }
+ return NULL;
+}
+
/**
- * netdev_set_master - set up master pointer
- * @slave: slave device
- * @master: new master device
- *
- * Changes the master device of the slave. Pass %NULL to break the
- * bonding. The caller must hold the RTNL semaphore. On a failure
- * a negative errno code is returned. On success the reference counts
- * are adjusted and the function returns zero.
+ * netdev_has_upper_dev - Check if device is linked to an upper device
+ * @dev: device
+ * @upper_dev: upper device to check
+ *
+ * Find out if a device is linked to specified upper device and return true
+ * in case it is. Note that this checks only immediate upper device,
+ * not through a complete stack of devices. The caller must hold the RTNL lock.
*/
-int netdev_set_master(struct net_device *slave, struct net_device *master)
+bool netdev_has_upper_dev(struct net_device *dev,
+ struct net_device *upper_dev)
{
- struct net_device *old = slave->master;
+ ASSERT_RTNL();
+ return __netdev_find_upper(dev, upper_dev);
+}
+EXPORT_SYMBOL(netdev_has_upper_dev);
+
+/**
+ * netdev_has_any_upper_dev - Check if device is linked to some device
+ * @dev: device
+ *
+ * Find out if a device is linked to an upper device and return true in case
+ * it is. The caller must hold the RTNL lock.
+ */
+bool netdev_has_any_upper_dev(struct net_device *dev)
+{
ASSERT_RTNL();
- if (master) {
- if (old)
- return -EBUSY;
- dev_hold(master);
- }
+ return !list_empty(&dev->upper_dev_list);
+}
+EXPORT_SYMBOL(netdev_has_any_upper_dev);
+
+/**
+ * netdev_master_upper_dev_get - Get master upper device
+ * @dev: device
+ *
+ * Find a master upper device and return pointer to it or NULL in case
+ * it's not there. The caller must hold the RTNL lock.
+ */
+struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
+{
+ struct netdev_upper *upper;
- slave->master = master;
+ ASSERT_RTNL();
- if (old)
- dev_put(old);
- return 0;
+ if (list_empty(&dev->upper_dev_list))
+ return NULL;
+
+ upper = list_first_entry(&dev->upper_dev_list,
+ struct netdev_upper, list);
+ if (likely(upper->master))
+ return upper->dev;
+ return NULL;
}
-EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_master_upper_dev_get);
/**
- * netdev_set_bond_master - set up bonding master/slave pair
- * @slave: slave device
- * @master: new master device
- *
- * Changes the master device of the slave. Pass %NULL to break the
- * bonding. The caller must hold the RTNL semaphore. On a failure
- * a negative errno code is returned. On success %RTM_NEWLINK is sent
- * to the routing socket and the function returns zero.
+ * netdev_master_upper_dev_get_rcu - Get master upper device
+ * @dev: device
+ *
+ * Find a master upper device and return pointer to it or NULL in case
+ * it's not there. The caller must hold the RCU read lock.
*/
-int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
+struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{
- int err;
+ struct netdev_upper *upper;
+
+ upper = list_first_or_null_rcu(&dev->upper_dev_list,
+ struct netdev_upper, list);
+ if (upper && likely(upper->master))
+ return upper->dev;
+ return NULL;
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
+
+static int __netdev_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev, bool master)
+{
+ struct netdev_upper *upper;
ASSERT_RTNL();
- err = netdev_set_master(slave, master);
- if (err)
- return err;
+ if (dev == upper_dev)
+ return -EBUSY;
+
+ /* To prevent loops, check if dev is not upper device to upper_dev. */
+ if (__netdev_search_upper_dev(upper_dev, dev))
+ return -EBUSY;
+
+ if (__netdev_find_upper(dev, upper_dev))
+ return -EEXIST;
+
+ if (master && netdev_master_upper_dev_get(dev))
+ return -EBUSY;
+
+ upper = kmalloc(sizeof(*upper), GFP_KERNEL);
+ if (!upper)
+ return -ENOMEM;
+
+ upper->dev = upper_dev;
+ upper->master = master;
+ INIT_LIST_HEAD(&upper->search_list);
+
+ /* Ensure that master upper link is always the first item in list. */
if (master)
- slave->flags |= IFF_SLAVE;
+ list_add_rcu(&upper->list, &dev->upper_dev_list);
else
- slave->flags &= ~IFF_SLAVE;
+ list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
+ dev_hold(upper_dev);
- rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
-EXPORT_SYMBOL(netdev_set_bond_master);
+
+/**
+ * netdev_upper_dev_link - Add a link to the upper device
+ * @dev: device
+ * @upper_dev: new upper device
+ *
+ * Adds a link to device which is upper to this one. The caller must hold
+ * the RTNL lock. On a failure a negative errno code is returned.
+ * On success the reference counts are adjusted and the function
+ * returns zero.
+ */
+int netdev_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ return __netdev_upper_dev_link(dev, upper_dev, false);
+}
+EXPORT_SYMBOL(netdev_upper_dev_link);
+
+/**
+ * netdev_master_upper_dev_link - Add a master link to the upper device
+ * @dev: device
+ * @upper_dev: new upper device
+ *
+ * Adds a link to device which is upper to this one. In this case, only
+ * one master upper device can be linked, although other non-master devices
+ * might be linked as well. The caller must hold the RTNL lock.
+ * On a failure a negative errno code is returned. On success the reference
+ * counts are adjusted and the function returns zero.
+ */
+int netdev_master_upper_dev_link(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ return __netdev_upper_dev_link(dev, upper_dev, true);
+}
+EXPORT_SYMBOL(netdev_master_upper_dev_link);
+
+/**
+ * netdev_upper_dev_unlink - Removes a link to upper device
+ * @dev: device
+ * @upper_dev: new upper device
+ *
+ * Removes a link to device which is upper to this one. The caller must hold
+ * the RTNL lock.
+ */
+void netdev_upper_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ struct netdev_upper *upper;
+
+ ASSERT_RTNL();
+
+ upper = __netdev_find_upper(dev, upper_dev);
+ if (!upper)
+ return;
+ list_del_rcu(&upper->list);
+ dev_put(upper_dev);
+ kfree_rcu(upper, rcu);
+}
+EXPORT_SYMBOL(netdev_upper_dev_unlink);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
@@ -5020,13 +5442,34 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
if (!netif_device_present(dev))
return -ENODEV;
err = ops->ndo_set_mac_address(dev, sa);
- if (!err)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ if (err)
+ return err;
+ dev->addr_assign_type = NET_ADDR_SET;
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
- return err;
+ return 0;
}
EXPORT_SYMBOL(dev_set_mac_address);
+/**
+ * dev_change_carrier - Change device carrier
+ * @dev: device
+ * @new_carries: new value
+ *
+ * Change device carrier
+ */
+int dev_change_carrier(struct net_device *dev, bool new_carrier)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_change_carrier)
+ return -EOPNOTSUPP;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return ops->ndo_change_carrier(dev, new_carrier);
+}
+EXPORT_SYMBOL(dev_change_carrier);
+
/*
* Perform the SIOCxIFxxx calls, inside rcu_read_lock()
*/
@@ -5482,11 +5925,15 @@ static void rollback_registered_many(struct list_head *head)
if (dev->netdev_ops->ndo_uninit)
dev->netdev_ops->ndo_uninit(dev);
- /* Notifier chain MUST detach us from master device. */
- WARN_ON(dev->master);
+ /* Notifier chain MUST detach us all upper devices. */
+ WARN_ON(netdev_has_any_upper_dev(dev));
/* Remove entries from kobject tree */
netdev_unregister_kobject(dev);
+#ifdef CONFIG_XPS
+ /* Remove XPS queueing entries */
+ netif_reset_xps_queues_gt(dev, 0);
+#endif
}
synchronize_net();
@@ -5815,6 +6262,13 @@ int register_netdevice(struct net_device *dev)
list_netdevice(dev);
add_device_randomness(dev->dev_addr, dev->addr_len);
+ /* If the device has permanent device address, driver should
+ * set dev_addr and also addr_assign_type should be set to
+ * NET_ADDR_PERM (default value).
+ */
+ if (dev->addr_assign_type == NET_ADDR_PERM)
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
ret = notifier_to_errno(ret);
@@ -6199,6 +6653,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->napi_list);
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
+ INIT_LIST_HEAD(&dev->upper_dev_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a8705432e4b1..d9d55209db67 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -175,7 +175,7 @@ static int __ethtool_get_sset_count(struct net_device *dev, int sset)
if (sset == ETH_SS_FEATURES)
return ARRAY_SIZE(netdev_features_strings);
- if (ops && ops->get_sset_count && ops->get_strings)
+ if (ops->get_sset_count && ops->get_strings)
return ops->get_sset_count(dev, sset);
else
return -EOPNOTSUPP;
@@ -311,7 +311,7 @@ int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
ASSERT_RTNL();
- if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
+ if (!dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
memset(cmd, 0, sizeof(struct ethtool_cmd));
@@ -355,7 +355,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
memset(&info, 0, sizeof(info));
info.cmd = ETHTOOL_GDRVINFO;
- if (ops && ops->get_drvinfo) {
+ if (ops->get_drvinfo) {
ops->get_drvinfo(dev, &info);
} else if (dev->dev.parent && dev->dev.parent->driver) {
strlcpy(info.bus_info, dev_name(dev->dev.parent),
@@ -370,7 +370,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
* this method of obtaining string set info is deprecated;
* Use ETHTOOL_GSSET_INFO instead.
*/
- if (ops && ops->get_sset_count) {
+ if (ops->get_sset_count) {
int rc;
rc = ops->get_sset_count(dev, ETH_SS_TEST);
@@ -383,9 +383,9 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
if (rc >= 0)
info.n_priv_flags = rc;
}
- if (ops && ops->get_regs_len)
+ if (ops->get_regs_len)
info.regdump_len = ops->get_regs_len(dev);
- if (ops && ops->get_eeprom_len)
+ if (ops->get_eeprom_len)
info.eedump_len = ops->get_eeprom_len(dev);
if (copy_to_user(useraddr, &info, sizeof(info)))
@@ -590,13 +590,14 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
struct ethtool_rxnfc rx_rings;
u32 user_size, dev_size, i;
u32 *indir;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
int ret;
- if (!dev->ethtool_ops->get_rxfh_indir_size ||
- !dev->ethtool_ops->set_rxfh_indir ||
- !dev->ethtool_ops->get_rxnfc)
+ if (!ops->get_rxfh_indir_size || !ops->set_rxfh_indir ||
+ !ops->get_rxnfc)
return -EOPNOTSUPP;
- dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev);
+
+ dev_size = ops->get_rxfh_indir_size(dev);
if (dev_size == 0)
return -EOPNOTSUPP;
@@ -613,7 +614,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
return -ENOMEM;
rx_rings.cmd = ETHTOOL_GRXRINGS;
- ret = dev->ethtool_ops->get_rxnfc(dev, &rx_rings, NULL);
+ ret = ops->get_rxnfc(dev, &rx_rings, NULL);
if (ret)
goto out;
@@ -639,7 +640,7 @@ static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
}
}
- ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
+ ret = ops->set_rxfh_indir(dev, indir);
out:
kfree(indir);
@@ -1082,9 +1083,10 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
{
struct ethtool_value id;
static bool busy;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
int rc;
- if (!dev->ethtool_ops->set_phys_id)
+ if (!ops->set_phys_id)
return -EOPNOTSUPP;
if (busy)
@@ -1093,7 +1095,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&id, useraddr, sizeof(id)))
return -EFAULT;
- rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
+ rc = ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
if (rc < 0)
return rc;
@@ -1118,7 +1120,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
i = n;
do {
rtnl_lock();
- rc = dev->ethtool_ops->set_phys_id(dev,
+ rc = ops->set_phys_id(dev,
(i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
rtnl_unlock();
if (rc)
@@ -1133,7 +1135,7 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
dev_put(dev);
busy = false;
- (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
+ (void) ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
return rc;
}
@@ -1275,7 +1277,7 @@ static int ethtool_get_dump_flag(struct net_device *dev,
struct ethtool_dump dump;
const struct ethtool_ops *ops = dev->ethtool_ops;
- if (!dev->ethtool_ops->get_dump_flag)
+ if (!ops->get_dump_flag)
return -EOPNOTSUPP;
if (copy_from_user(&dump, useraddr, sizeof(dump)))
@@ -1299,8 +1301,7 @@ static int ethtool_get_dump_data(struct net_device *dev,
const struct ethtool_ops *ops = dev->ethtool_ops;
void *data = NULL;
- if (!dev->ethtool_ops->get_dump_data ||
- !dev->ethtool_ops->get_dump_flag)
+ if (!ops->get_dump_data || !ops->get_dump_flag)
return -EOPNOTSUPP;
if (copy_from_user(&dump, useraddr, sizeof(dump)))
@@ -1346,13 +1347,9 @@ static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
info.cmd = ETHTOOL_GET_TS_INFO;
if (phydev && phydev->drv && phydev->drv->ts_info) {
-
err = phydev->drv->ts_info(phydev, &info);
-
- } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
-
+ } else if (ops->get_ts_info) {
err = ops->get_ts_info(dev, &info);
-
} else {
info.so_timestamping =
SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/net/core/filter.c b/net/core/filter.c
index c23543cba132..2ead2a9b1859 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -532,6 +532,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
};
int pc;
+ bool anc_found;
if (flen == 0 || flen > BPF_MAXINSNS)
return -EINVAL;
@@ -592,8 +593,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
case BPF_S_LD_W_ABS:
case BPF_S_LD_H_ABS:
case BPF_S_LD_B_ABS:
+ anc_found = false;
#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
code = BPF_S_ANC_##CODE; \
+ anc_found = true; \
break
switch (ftest->k) {
ANCILLARY(PROTOCOL);
@@ -610,6 +613,10 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
ANCILLARY(VLAN_TAG);
ANCILLARY(VLAN_TAG_PRESENT);
}
+
+ /* ancillary operation unknown or unsupported */
+ if (anc_found == false && ftest->k >= SKF_AD_OFF)
+ return -EINVAL;
}
ftest->code = code;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 28c5f5aa7ca7..a5b89a6fec6d 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -126,6 +126,19 @@ static ssize_t show_broadcast(struct device *dev,
return -EINVAL;
}
+static int change_carrier(struct net_device *net, unsigned long new_carrier)
+{
+ if (!netif_running(net))
+ return -EINVAL;
+ return dev_change_carrier(net, (bool) new_carrier);
+}
+
+static ssize_t store_carrier(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_carrier);
+}
+
static ssize_t show_carrier(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -331,7 +344,7 @@ static struct device_attribute net_class_attributes[] = {
__ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
__ATTR(address, S_IRUGO, show_address, NULL),
__ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
- __ATTR(carrier, S_IRUGO, show_carrier, NULL),
+ __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier),
__ATTR(speed, S_IRUGO, show_speed, NULL),
__ATTR(duplex, S_IRUGO, show_duplex, NULL),
__ATTR(dormant, S_IRUGO, show_dormant, NULL),
@@ -989,68 +1002,14 @@ static ssize_t show_xps_map(struct netdev_queue *queue,
return len;
}
-static DEFINE_MUTEX(xps_map_mutex);
-#define xmap_dereference(P) \
- rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
-
-static void xps_queue_release(struct netdev_queue *queue)
-{
- struct net_device *dev = queue->dev;
- struct xps_dev_maps *dev_maps;
- struct xps_map *map;
- unsigned long index;
- int i, pos, nonempty = 0;
-
- index = get_netdev_queue_index(queue);
-
- mutex_lock(&xps_map_mutex);
- dev_maps = xmap_dereference(dev->xps_maps);
-
- if (dev_maps) {
- for_each_possible_cpu(i) {
- map = xmap_dereference(dev_maps->cpu_map[i]);
- if (!map)
- continue;
-
- for (pos = 0; pos < map->len; pos++)
- if (map->queues[pos] == index)
- break;
-
- if (pos < map->len) {
- if (map->len > 1)
- map->queues[pos] =
- map->queues[--map->len];
- else {
- RCU_INIT_POINTER(dev_maps->cpu_map[i],
- NULL);
- kfree_rcu(map, rcu);
- map = NULL;
- }
- }
- if (map)
- nonempty = 1;
- }
-
- if (!nonempty) {
- RCU_INIT_POINTER(dev->xps_maps, NULL);
- kfree_rcu(dev_maps, rcu);
- }
- }
- mutex_unlock(&xps_map_mutex);
-}
-
static ssize_t store_xps_map(struct netdev_queue *queue,
struct netdev_queue_attribute *attribute,
const char *buf, size_t len)
{
struct net_device *dev = queue->dev;
- cpumask_var_t mask;
- int err, i, cpu, pos, map_len, alloc_len, need_set;
unsigned long index;
- struct xps_map *map, *new_map;
- struct xps_dev_maps *dev_maps, *new_dev_maps;
- int nonempty = 0;
- int numa_node_id = -2;
+ cpumask_var_t mask;
+ int err;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1066,105 +1025,11 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
return err;
}
- new_dev_maps = kzalloc(max_t(unsigned int,
- XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
- if (!new_dev_maps) {
- free_cpumask_var(mask);
- return -ENOMEM;
- }
-
- mutex_lock(&xps_map_mutex);
-
- dev_maps = xmap_dereference(dev->xps_maps);
-
- for_each_possible_cpu(cpu) {
- map = dev_maps ?
- xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
- new_map = map;
- if (map) {
- for (pos = 0; pos < map->len; pos++)
- if (map->queues[pos] == index)
- break;
- map_len = map->len;
- alloc_len = map->alloc_len;
- } else
- pos = map_len = alloc_len = 0;
-
- need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
-#ifdef CONFIG_NUMA
- if (need_set) {
- if (numa_node_id == -2)
- numa_node_id = cpu_to_node(cpu);
- else if (numa_node_id != cpu_to_node(cpu))
- numa_node_id = -1;
- }
-#endif
- if (need_set && pos >= map_len) {
- /* Need to add queue to this CPU's map */
- if (map_len >= alloc_len) {
- alloc_len = alloc_len ?
- 2 * alloc_len : XPS_MIN_MAP_ALLOC;
- new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
- GFP_KERNEL,
- cpu_to_node(cpu));
- if (!new_map)
- goto error;
- new_map->alloc_len = alloc_len;
- for (i = 0; i < map_len; i++)
- new_map->queues[i] = map->queues[i];
- new_map->len = map_len;
- }
- new_map->queues[new_map->len++] = index;
- } else if (!need_set && pos < map_len) {
- /* Need to remove queue from this CPU's map */
- if (map_len > 1)
- new_map->queues[pos] =
- new_map->queues[--new_map->len];
- else
- new_map = NULL;
- }
- RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
- }
-
- /* Cleanup old maps */
- for_each_possible_cpu(cpu) {
- map = dev_maps ?
- xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
- if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
- kfree_rcu(map, rcu);
- if (new_dev_maps->cpu_map[cpu])
- nonempty = 1;
- }
-
- if (nonempty) {
- rcu_assign_pointer(dev->xps_maps, new_dev_maps);
- } else {
- kfree(new_dev_maps);
- RCU_INIT_POINTER(dev->xps_maps, NULL);
- }
-
- if (dev_maps)
- kfree_rcu(dev_maps, rcu);
-
- netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
- NUMA_NO_NODE);
-
- mutex_unlock(&xps_map_mutex);
+ err = netif_set_xps_queue(dev, mask, index);
free_cpumask_var(mask);
- return len;
-error:
- mutex_unlock(&xps_map_mutex);
-
- if (new_dev_maps)
- for_each_possible_cpu(i)
- kfree(rcu_dereference_protected(
- new_dev_maps->cpu_map[i],
- 1));
- kfree(new_dev_maps);
- free_cpumask_var(mask);
- return -ENOMEM;
+ return err ? : len;
}
static struct netdev_queue_attribute xps_cpus_attribute =
@@ -1183,10 +1048,6 @@ static void netdev_queue_release(struct kobject *kobj)
{
struct netdev_queue *queue = to_netdev_queue(kobj);
-#ifdef CONFIG_XPS
- xps_queue_release(queue);
-#endif
-
memset(kobj, 0, sizeof(*kobj));
dev_put(queue->dev);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3151acf5ec13..9f0506726101 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -29,6 +29,9 @@
#include <linux/if_vlan.h>
#include <net/tcp.h>
#include <net/udp.h>
+#include <net/addrconf.h>
+#include <net/ndisc.h>
+#include <net/ip6_checksum.h>
#include <asm/unaligned.h>
#include <trace/events/napi.h>
@@ -55,7 +58,7 @@ static atomic_t trapped;
MAX_UDP_CHUNK)
static void zap_completion_queue(void);
-static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
+static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
static unsigned int carrier_timeout = 4;
module_param(carrier_timeout, uint, 0644);
@@ -181,13 +184,13 @@ static void poll_napi(struct net_device *dev)
}
}
-static void service_arp_queue(struct netpoll_info *npi)
+static void service_neigh_queue(struct netpoll_info *npi)
{
if (npi) {
struct sk_buff *skb;
- while ((skb = skb_dequeue(&npi->arp_tx)))
- netpoll_arp_reply(skb, npi);
+ while ((skb = skb_dequeue(&npi->neigh_tx)))
+ netpoll_neigh_reply(skb, npi);
}
}
@@ -210,17 +213,20 @@ static void netpoll_poll_dev(struct net_device *dev)
if (dev->flags & IFF_SLAVE) {
if (ni) {
- struct net_device *bond_dev = dev->master;
+ struct net_device *bond_dev;
struct sk_buff *skb;
- struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
- while ((skb = skb_dequeue(&ni->arp_tx))) {
+ struct netpoll_info *bond_ni;
+
+ bond_dev = netdev_master_upper_dev_get_rcu(dev);
+ bond_ni = rcu_dereference_bh(bond_dev->npinfo);
+ while ((skb = skb_dequeue(&ni->neigh_tx))) {
skb->dev = bond_dev;
- skb_queue_tail(&bond_ni->arp_tx, skb);
+ skb_queue_tail(&bond_ni->neigh_tx, skb);
}
}
}
- service_arp_queue(ni);
+ service_neigh_queue(ni);
zap_completion_queue();
}
@@ -381,9 +387,14 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
struct iphdr *iph;
struct ethhdr *eth;
static atomic_t ip_ident;
+ struct ipv6hdr *ip6h;
udp_len = len + sizeof(*udph);
- ip_len = udp_len + sizeof(*iph);
+ if (np->ipv6)
+ ip_len = udp_len + sizeof(*ip6h);
+ else
+ ip_len = udp_len + sizeof(*iph);
+
total_len = ip_len + LL_RESERVED_SPACE(np->dev);
skb = find_skb(np, total_len + np->dev->needed_tailroom,
@@ -400,34 +411,66 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
udph->source = htons(np->local_port);
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
- udph->check = 0;
- udph->check = csum_tcpudp_magic(np->local_ip,
- np->remote_ip,
- udp_len, IPPROTO_UDP,
- csum_partial(udph, udp_len, 0));
- if (udph->check == 0)
- udph->check = CSUM_MANGLED_0;
-
- skb_push(skb, sizeof(*iph));
- skb_reset_network_header(skb);
- iph = ip_hdr(skb);
-
- /* iph->version = 4; iph->ihl = 5; */
- put_unaligned(0x45, (unsigned char *)iph);
- iph->tos = 0;
- put_unaligned(htons(ip_len), &(iph->tot_len));
- iph->id = htons(atomic_inc_return(&ip_ident));
- iph->frag_off = 0;
- iph->ttl = 64;
- iph->protocol = IPPROTO_UDP;
- iph->check = 0;
- put_unaligned(np->local_ip, &(iph->saddr));
- put_unaligned(np->remote_ip, &(iph->daddr));
- iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
-
- eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb->protocol = eth->h_proto = htons(ETH_P_IP);
+
+ if (np->ipv6) {
+ udph->check = 0;
+ udph->check = csum_ipv6_magic(&np->local_ip.in6,
+ &np->remote_ip.in6,
+ udp_len, IPPROTO_UDP,
+ csum_partial(udph, udp_len, 0));
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+
+ skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+
+ /* ip6h->version = 6; ip6h->priority = 0; */
+ put_unaligned(0x60, (unsigned char *)ip6h);
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+
+ ip6h->payload_len = htons(sizeof(struct udphdr) + len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = 32;
+ ip6h->saddr = np->local_ip.in6;
+ ip6h->daddr = np->remote_ip.in6;
+
+ eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
+ } else {
+ udph->check = 0;
+ udph->check = csum_tcpudp_magic(np->local_ip.ip,
+ np->remote_ip.ip,
+ udp_len, IPPROTO_UDP,
+ csum_partial(udph, udp_len, 0));
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+
+ skb_push(skb, sizeof(*iph));
+ skb_reset_network_header(skb);
+ iph = ip_hdr(skb);
+
+ /* iph->version = 4; iph->ihl = 5; */
+ put_unaligned(0x45, (unsigned char *)iph);
+ iph->tos = 0;
+ put_unaligned(htons(ip_len), &(iph->tot_len));
+ iph->id = htons(atomic_inc_return(&ip_ident));
+ iph->frag_off = 0;
+ iph->ttl = 64;
+ iph->protocol = IPPROTO_UDP;
+ iph->check = 0;
+ put_unaligned(np->local_ip.ip, &(iph->saddr));
+ put_unaligned(np->remote_ip.ip, &(iph->daddr));
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth->h_proto = htons(ETH_P_IP);
+ }
+
memcpy(eth->h_source, np->dev->dev_addr, ETH_ALEN);
memcpy(eth->h_dest, np->remote_mac, ETH_ALEN);
@@ -437,18 +480,16 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
}
EXPORT_SYMBOL(netpoll_send_udp);
-static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
+static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
{
- struct arphdr *arp;
- unsigned char *arp_ptr;
- int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;
+ int size, type = ARPOP_REPLY;
__be32 sip, tip;
unsigned char *sha;
struct sk_buff *send_skb;
struct netpoll *np, *tmp;
unsigned long flags;
int hlen, tlen;
- int hits = 0;
+ int hits = 0, proto;
if (list_empty(&npinfo->rx_np))
return;
@@ -466,94 +507,214 @@ static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
if (!hits)
return;
- /* No arp on this interface */
- if (skb->dev->flags & IFF_NOARP)
- return;
+ proto = ntohs(eth_hdr(skb)->h_proto);
+ if (proto == ETH_P_IP) {
+ struct arphdr *arp;
+ unsigned char *arp_ptr;
+ /* No arp on this interface */
+ if (skb->dev->flags & IFF_NOARP)
+ return;
- if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
- return;
+ if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
+ return;
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
- arp = arp_hdr(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ arp = arp_hdr(skb);
- if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
- arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
- arp->ar_pro != htons(ETH_P_IP) ||
- arp->ar_op != htons(ARPOP_REQUEST))
- return;
-
- arp_ptr = (unsigned char *)(arp+1);
- /* save the location of the src hw addr */
- sha = arp_ptr;
- arp_ptr += skb->dev->addr_len;
- memcpy(&sip, arp_ptr, 4);
- arp_ptr += 4;
- /* If we actually cared about dst hw addr,
- it would get copied here */
- arp_ptr += skb->dev->addr_len;
- memcpy(&tip, arp_ptr, 4);
-
- /* Should we ignore arp? */
- if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
- return;
+ if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
+ arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
+ arp->ar_pro != htons(ETH_P_IP) ||
+ arp->ar_op != htons(ARPOP_REQUEST))
+ return;
- size = arp_hdr_len(skb->dev);
+ arp_ptr = (unsigned char *)(arp+1);
+ /* save the location of the src hw addr */
+ sha = arp_ptr;
+ arp_ptr += skb->dev->addr_len;
+ memcpy(&sip, arp_ptr, 4);
+ arp_ptr += 4;
+ /* If we actually cared about dst hw addr,
+ it would get copied here */
+ arp_ptr += skb->dev->addr_len;
+ memcpy(&tip, arp_ptr, 4);
- spin_lock_irqsave(&npinfo->rx_lock, flags);
- list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
- if (tip != np->local_ip)
- continue;
+ /* Should we ignore arp? */
+ if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
+ return;
- hlen = LL_RESERVED_SPACE(np->dev);
- tlen = np->dev->needed_tailroom;
- send_skb = find_skb(np, size + hlen + tlen, hlen);
- if (!send_skb)
- continue;
+ size = arp_hdr_len(skb->dev);
- skb_reset_network_header(send_skb);
- arp = (struct arphdr *) skb_put(send_skb, size);
- send_skb->dev = skb->dev;
- send_skb->protocol = htons(ETH_P_ARP);
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (tip != np->local_ip.ip)
+ continue;
+
+ hlen = LL_RESERVED_SPACE(np->dev);
+ tlen = np->dev->needed_tailroom;
+ send_skb = find_skb(np, size + hlen + tlen, hlen);
+ if (!send_skb)
+ continue;
+
+ skb_reset_network_header(send_skb);
+ arp = (struct arphdr *) skb_put(send_skb, size);
+ send_skb->dev = skb->dev;
+ send_skb->protocol = htons(ETH_P_ARP);
+
+ /* Fill the device header for the ARP frame */
+ if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
+ sha, np->dev->dev_addr,
+ send_skb->len) < 0) {
+ kfree_skb(send_skb);
+ continue;
+ }
- /* Fill the device header for the ARP frame */
- if (dev_hard_header(send_skb, skb->dev, ptype,
- sha, np->dev->dev_addr,
- send_skb->len) < 0) {
- kfree_skb(send_skb);
- continue;
+ /*
+ * Fill out the arp protocol part.
+ *
+ * we only support ethernet device type,
+ * which (according to RFC 1390) should
+ * always equal 1 (Ethernet).
+ */
+
+ arp->ar_hrd = htons(np->dev->type);
+ arp->ar_pro = htons(ETH_P_IP);
+ arp->ar_hln = np->dev->addr_len;
+ arp->ar_pln = 4;
+ arp->ar_op = htons(type);
+
+ arp_ptr = (unsigned char *)(arp + 1);
+ memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
+ arp_ptr += np->dev->addr_len;
+ memcpy(arp_ptr, &tip, 4);
+ arp_ptr += 4;
+ memcpy(arp_ptr, sha, np->dev->addr_len);
+ arp_ptr += np->dev->addr_len;
+ memcpy(arp_ptr, &sip, 4);
+
+ netpoll_send_skb(np, send_skb);
+
+ /* If there are several rx_hooks for the same address,
+ we're fine by sending a single reply */
+ break;
}
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+ } else if( proto == ETH_P_IPV6) {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct nd_msg *msg;
+ u8 *lladdr = NULL;
+ struct ipv6hdr *hdr;
+ struct icmp6hdr *icmp6h;
+ const struct in6_addr *saddr;
+ const struct in6_addr *daddr;
+ struct inet6_dev *in6_dev = NULL;
+ struct in6_addr *target;
+
+ in6_dev = in6_dev_get(skb->dev);
+ if (!in6_dev || !in6_dev->cnf.accept_ra)
+ return;
- /*
- * Fill out the arp protocol part.
- *
- * we only support ethernet device type,
- * which (according to RFC 1390) should
- * always equal 1 (Ethernet).
- */
+ if (!pskb_may_pull(skb, skb->len))
+ return;
- arp->ar_hrd = htons(np->dev->type);
- arp->ar_pro = htons(ETH_P_IP);
- arp->ar_hln = np->dev->addr_len;
- arp->ar_pln = 4;
- arp->ar_op = htons(type);
+ msg = (struct nd_msg *)skb_transport_header(skb);
- arp_ptr = (unsigned char *)(arp + 1);
- memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
- arp_ptr += np->dev->addr_len;
- memcpy(arp_ptr, &tip, 4);
- arp_ptr += 4;
- memcpy(arp_ptr, sha, np->dev->addr_len);
- arp_ptr += np->dev->addr_len;
- memcpy(arp_ptr, &sip, 4);
+ __skb_push(skb, skb->data - skb_transport_header(skb));
- netpoll_send_skb(np, send_skb);
+ if (ipv6_hdr(skb)->hop_limit != 255)
+ return;
+ if (msg->icmph.icmp6_code != 0)
+ return;
+ if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ return;
- /* If there are several rx_hooks for the same address,
- we're fine by sending a single reply */
- break;
+ saddr = &ipv6_hdr(skb)->saddr;
+ daddr = &ipv6_hdr(skb)->daddr;
+
+ size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
+
+ spin_lock_irqsave(&npinfo->rx_lock, flags);
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (memcmp(daddr, &np->local_ip, sizeof(*daddr)))
+ continue;
+
+ hlen = LL_RESERVED_SPACE(np->dev);
+ tlen = np->dev->needed_tailroom;
+ send_skb = find_skb(np, size + hlen + tlen, hlen);
+ if (!send_skb)
+ continue;
+
+ send_skb->protocol = htons(ETH_P_IPV6);
+ send_skb->dev = skb->dev;
+
+ skb_reset_network_header(send_skb);
+ skb_put(send_skb, sizeof(struct ipv6hdr));
+ hdr = ipv6_hdr(send_skb);
+
+ *(__be32*)hdr = htonl(0x60000000);
+
+ hdr->payload_len = htons(size);
+ hdr->nexthdr = IPPROTO_ICMPV6;
+ hdr->hop_limit = 255;
+ hdr->saddr = *saddr;
+ hdr->daddr = *daddr;
+
+ send_skb->transport_header = send_skb->tail;
+ skb_put(send_skb, size);
+
+ icmp6h = (struct icmp6hdr *)skb_transport_header(skb);
+ icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ icmp6h->icmp6_router = 0;
+ icmp6h->icmp6_solicited = 1;
+ target = (struct in6_addr *)skb_transport_header(send_skb) + sizeof(struct icmp6hdr);
+ *target = msg->target;
+ icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
+ IPPROTO_ICMPV6,
+ csum_partial(icmp6h,
+ size, 0));
+
+ if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
+ lladdr, np->dev->dev_addr,
+ send_skb->len) < 0) {
+ kfree_skb(send_skb);
+ continue;
+ }
+
+ netpoll_send_skb(np, send_skb);
+
+ /* If there are several rx_hooks for the same address,
+ we're fine by sending a single reply */
+ break;
+ }
+ spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+#endif
}
- spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+}
+
+static bool pkt_is_ns(struct sk_buff *skb)
+{
+ struct nd_msg *msg;
+ struct ipv6hdr *hdr;
+
+ if (skb->protocol != htons(ETH_P_ARP))
+ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
+ return false;
+
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ __skb_push(skb, skb->data - skb_transport_header(skb));
+ hdr = ipv6_hdr(skb);
+
+ if (hdr->nexthdr != IPPROTO_ICMPV6)
+ return false;
+ if (hdr->hop_limit != 255)
+ return false;
+ if (msg->icmph.icmp6_code != 0)
+ return false;
+ if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ return false;
+
+ return true;
}
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
@@ -571,9 +732,11 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
goto out;
/* check if netpoll clients need ARP */
- if (skb->protocol == htons(ETH_P_ARP) &&
- atomic_read(&trapped)) {
- skb_queue_tail(&npinfo->arp_tx, skb);
+ if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
+ skb_queue_tail(&npinfo->neigh_tx, skb);
+ return 1;
+ } else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
+ skb_queue_tail(&npinfo->neigh_tx, skb);
return 1;
}
@@ -584,60 +747,100 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
}
proto = ntohs(eth_hdr(skb)->h_proto);
- if (proto != ETH_P_IP)
+ if (proto != ETH_P_IP && proto != ETH_P_IPV6)
goto out;
if (skb->pkt_type == PACKET_OTHERHOST)
goto out;
if (skb_shared(skb))
goto out;
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- goto out;
- iph = (struct iphdr *)skb->data;
- if (iph->ihl < 5 || iph->version != 4)
- goto out;
- if (!pskb_may_pull(skb, iph->ihl*4))
- goto out;
- iph = (struct iphdr *)skb->data;
- if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
- goto out;
-
- len = ntohs(iph->tot_len);
- if (skb->len < len || len < iph->ihl*4)
- goto out;
+ if (proto == ETH_P_IP) {
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ goto out;
+ iph = (struct iphdr *)skb->data;
+ if (iph->ihl < 5 || iph->version != 4)
+ goto out;
+ if (!pskb_may_pull(skb, iph->ihl*4))
+ goto out;
+ iph = (struct iphdr *)skb->data;
+ if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
+ goto out;
- /*
- * Our transport medium may have padded the buffer out.
- * Now We trim to the true length of the frame.
- */
- if (pskb_trim_rcsum(skb, len))
- goto out;
+ len = ntohs(iph->tot_len);
+ if (skb->len < len || len < iph->ihl*4)
+ goto out;
- iph = (struct iphdr *)skb->data;
- if (iph->protocol != IPPROTO_UDP)
- goto out;
+ /*
+ * Our transport medium may have padded the buffer out.
+ * Now We trim to the true length of the frame.
+ */
+ if (pskb_trim_rcsum(skb, len))
+ goto out;
- len -= iph->ihl*4;
- uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
- ulen = ntohs(uh->len);
+ iph = (struct iphdr *)skb->data;
+ if (iph->protocol != IPPROTO_UDP)
+ goto out;
- if (ulen != len)
- goto out;
- if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
- goto out;
+ len -= iph->ihl*4;
+ uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
+ ulen = ntohs(uh->len);
- list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
- if (np->local_ip && np->local_ip != iph->daddr)
- continue;
- if (np->remote_ip && np->remote_ip != iph->saddr)
- continue;
- if (np->local_port && np->local_port != ntohs(uh->dest))
- continue;
+ if (ulen != len)
+ goto out;
+ if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
+ goto out;
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
+ continue;
+ if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
+ continue;
+ if (np->local_port && np->local_port != ntohs(uh->dest))
+ continue;
+
+ np->rx_hook(np, ntohs(uh->source),
+ (char *)(uh+1),
+ ulen - sizeof(struct udphdr));
+ hits++;
+ }
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ const struct ipv6hdr *ip6h;
- np->rx_hook(np, ntohs(uh->source),
- (char *)(uh+1),
- ulen - sizeof(struct udphdr));
- hits++;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ goto out;
+ ip6h = (struct ipv6hdr *)skb->data;
+ if (ip6h->version != 6)
+ goto out;
+ len = ntohs(ip6h->payload_len);
+ if (!len)
+ goto out;
+ if (len + sizeof(struct ipv6hdr) > skb->len)
+ goto out;
+ if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
+ goto out;
+ ip6h = ipv6_hdr(skb);
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+ uh = udp_hdr(skb);
+ ulen = ntohs(uh->len);
+ if (ulen != skb->len)
+ goto out;
+ if (udp6_csum_init(skb, uh, IPPROTO_UDP))
+ goto out;
+ list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
+ if (memcmp(&np->local_ip.in6, &ip6h->daddr, sizeof(struct in6_addr)) != 0)
+ continue;
+ if (memcmp(&np->remote_ip.in6, &ip6h->saddr, sizeof(struct in6_addr)) != 0)
+ continue;
+ if (np->local_port && np->local_port != ntohs(uh->dest))
+ continue;
+
+ np->rx_hook(np, ntohs(uh->source),
+ (char *)(uh+1),
+ ulen - sizeof(struct udphdr));
+ hits++;
+ }
+#endif
}
if (!hits)
@@ -658,17 +861,44 @@ out:
void netpoll_print_options(struct netpoll *np)
{
np_info(np, "local port %d\n", np->local_port);
- np_info(np, "local IP %pI4\n", &np->local_ip);
+ if (np->ipv6)
+ np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
+ else
+ np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
np_info(np, "interface '%s'\n", np->dev_name);
np_info(np, "remote port %d\n", np->remote_port);
- np_info(np, "remote IP %pI4\n", &np->remote_ip);
+ if (np->ipv6)
+ np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
+ else
+ np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
np_info(np, "remote ethernet address %pM\n", np->remote_mac);
}
EXPORT_SYMBOL(netpoll_print_options);
+static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
+{
+ const char *end;
+
+ if (!strchr(str, ':') &&
+ in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
+ if (!*end)
+ return 0;
+ }
+ if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!*end)
+ return 1;
+#else
+ return -1;
+#endif
+ }
+ return -1;
+}
+
int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
+ int ipv6;
if (*cur != '@') {
if ((delim = strchr(cur, '@')) == NULL)
@@ -684,7 +914,11 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
- np->local_ip = in_aton(cur);
+ ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
cur = delim;
}
cur++;
@@ -716,7 +950,13 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
- np->remote_ip = in_aton(cur);
+ ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
+ if (ipv6 < 0)
+ goto parse_failed;
+ else if (np->ipv6 != (bool)ipv6)
+ goto parse_failed;
+ else
+ np->ipv6 = (bool)ipv6;
cur = delim + 1;
if (*cur != 0) {
@@ -764,7 +1004,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
INIT_LIST_HEAD(&npinfo->rx_np);
spin_lock_init(&npinfo->rx_lock);
- skb_queue_head_init(&npinfo->arp_tx);
+ skb_queue_head_init(&npinfo->neigh_tx);
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
@@ -815,7 +1055,7 @@ int netpoll_setup(struct netpoll *np)
return -ENODEV;
}
- if (ndev->master) {
+ if (netdev_master_upper_dev_get(ndev)) {
np_err(np, "%s is a slave device, aborting\n", np->dev_name);
err = -EBUSY;
goto put;
@@ -856,21 +1096,56 @@ int netpoll_setup(struct netpoll *np)
}
}
- if (!np->local_ip) {
- rcu_read_lock();
- in_dev = __in_dev_get_rcu(ndev);
+ if (!np->local_ip.ip) {
+ if (!np->ipv6) {
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(ndev);
+
- if (!in_dev || !in_dev->ifa_list) {
+ if (!in_dev || !in_dev->ifa_list) {
+ rcu_read_unlock();
+ np_err(np, "no IP address for %s, aborting\n",
+ np->dev_name);
+ err = -EDESTADDRREQ;
+ goto put;
+ }
+
+ np->local_ip.ip = in_dev->ifa_list->ifa_local;
rcu_read_unlock();
- np_err(np, "no IP address for %s, aborting\n",
- np->dev_name);
+ np_info(np, "local IP %pI4\n", &np->local_ip.ip);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct inet6_dev *idev;
+
err = -EDESTADDRREQ;
+ rcu_read_lock();
+ idev = __in6_dev_get(ndev);
+ if (idev) {
+ struct inet6_ifaddr *ifp;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ continue;
+ np->local_ip.in6 = ifp->addr;
+ err = 0;
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+ }
+ rcu_read_unlock();
+ if (err) {
+ np_err(np, "no IPv6 address for %s, aborting\n",
+ np->dev_name);
+ goto put;
+ } else
+ np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
+#else
+ np_err(np, "IPv6 is not supported %s, aborting\n",
+ np->dev_name);
goto put;
+#endif
}
-
- np->local_ip = in_dev->ifa_list->ifa_local;
- rcu_read_unlock();
- np_info(np, "local IP %pI4\n", &np->local_ip);
}
/* fill up the skb queue */
@@ -903,7 +1178,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
struct netpoll_info *npinfo =
container_of(rcu_head, struct netpoll_info, rcu);
- skb_queue_purge(&npinfo->arp_tx);
+ skb_queue_purge(&npinfo->neigh_tx);
skb_queue_purge(&npinfo->txq);
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1868625af25e..9a419b099482 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -780,6 +780,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(1) /* IFLA_CARRIER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
@@ -879,6 +880,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
const struct rtnl_link_stats64 *stats;
struct nlattr *attr, *af_spec;
struct rtnl_af_ops *af_ops;
+ struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
@@ -907,8 +909,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
#endif
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
- (dev->master &&
- nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+ (upper_dev &&
+ nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
+ nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
(dev->qdisc &&
nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
(dev->ifalias &&
@@ -1108,6 +1111,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_MTU] = { .type = NLA_U32 },
[IFLA_LINK] = { .type = NLA_U32 },
[IFLA_MASTER] = { .type = NLA_U32 },
+ [IFLA_CARRIER] = { .type = NLA_U8 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
@@ -1270,16 +1274,16 @@ static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
static int do_set_master(struct net_device *dev, int ifindex)
{
- struct net_device *master_dev;
+ struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops;
int err;
- if (dev->master) {
- if (dev->master->ifindex == ifindex)
+ if (upper_dev) {
+ if (upper_dev->ifindex == ifindex)
return 0;
- ops = dev->master->netdev_ops;
+ ops = upper_dev->netdev_ops;
if (ops->ndo_del_slave) {
- err = ops->ndo_del_slave(dev->master, dev);
+ err = ops->ndo_del_slave(upper_dev, dev);
if (err)
return err;
} else {
@@ -1288,12 +1292,12 @@ static int do_set_master(struct net_device *dev, int ifindex)
}
if (ifindex) {
- master_dev = __dev_get_by_index(dev_net(dev), ifindex);
- if (!master_dev)
+ upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
+ if (!upper_dev)
return -EINVAL;
- ops = master_dev->netdev_ops;
+ ops = upper_dev->netdev_ops;
if (ops->ndo_add_slave) {
- err = ops->ndo_add_slave(master_dev, dev);
+ err = ops->ndo_add_slave(upper_dev, dev);
if (err)
return err;
} else {
@@ -1307,7 +1311,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
const struct net_device_ops *ops = dev->netdev_ops;
- int send_addr_notify = 0;
int err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
@@ -1360,16 +1363,6 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct sockaddr *sa;
int len;
- if (!ops->ndo_set_mac_address) {
- err = -EOPNOTSUPP;
- goto errout;
- }
-
- if (!netif_device_present(dev)) {
- err = -ENODEV;
- goto errout;
- }
-
len = sizeof(sa_family_t) + dev->addr_len;
sa = kmalloc(len, GFP_KERNEL);
if (!sa) {
@@ -1379,13 +1372,11 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
sa->sa_family = dev->type;
memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
dev->addr_len);
- err = ops->ndo_set_mac_address(dev, sa);
+ err = dev_set_mac_address(dev, sa);
kfree(sa);
if (err)
goto errout;
- send_addr_notify = 1;
modified = 1;
- add_device_randomness(dev->dev_addr, dev->addr_len);
}
if (tb[IFLA_MTU]) {
@@ -1422,7 +1413,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
- send_addr_notify = 1;
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
}
if (ifm->ifi_flags || ifm->ifi_change) {
@@ -1438,6 +1429,13 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
modified = 1;
}
+ if (tb[IFLA_CARRIER]) {
+ err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
+ if (err)
+ goto errout;
+ modified = 1;
+ }
+
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
@@ -1536,9 +1534,6 @@ errout:
net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
dev->name);
- if (send_addr_notify)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
-
return err;
}
@@ -1672,9 +1667,11 @@ struct net_device *rtnl_create_link(struct net *net,
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
- if (tb[IFLA_ADDRESS])
+ if (tb[IFLA_ADDRESS]) {
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
nla_len(tb[IFLA_ADDRESS]));
+ dev->addr_assign_type = NET_ADDR_SET;
+ }
if (tb[IFLA_BROADCAST])
memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
nla_len(tb[IFLA_BROADCAST]));
@@ -1992,6 +1989,7 @@ errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
+EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
@@ -2054,7 +2052,6 @@ errout:
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct net_device *master = NULL;
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
@@ -2096,10 +2093,10 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
- master = dev->master;
- err = master->netdev_ops->ndo_fdb_add(ndm, tb,
- dev, addr,
- nlh->nlmsg_flags);
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ const struct net_device_ops *ops = br_dev->netdev_ops;
+
+ err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
if (err)
goto out;
else
@@ -2160,10 +2157,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
- struct net_device *master = dev->master;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ const struct net_device_ops *ops = br_dev->netdev_ops;
- if (master->netdev_ops->ndo_fdb_del)
- err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
+ if (ops->ndo_fdb_del)
+ err = ops->ndo_fdb_del(ndm, dev, addr);
if (err)
goto out;
@@ -2247,9 +2245,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (dev->priv_flags & IFF_BRIDGE_PORT) {
- struct net_device *master = dev->master;
- const struct net_device_ops *ops = master->netdev_ops;
+ struct net_device *br_dev;
+ const struct net_device_ops *ops;
+ br_dev = netdev_master_upper_dev_get(dev);
+ ops = br_dev->netdev_ops;
if (ops->ndo_fdb_dump)
idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
}
@@ -2270,6 +2270,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct ifinfomsg *ifm;
struct nlattr *br_afspec;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
if (nlh == NULL)
@@ -2287,8 +2288,8 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
- (dev->master &&
- nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
+ (br_dev &&
+ nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
(dev->ifindex != dev->iflink &&
@@ -2324,11 +2325,11 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
const struct net_device_ops *ops = dev->netdev_ops;
- struct net_device *master = dev->master;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- if (master && master->netdev_ops->ndo_bridge_getlink) {
+ if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
- master->netdev_ops->ndo_bridge_getlink(
+ br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev) < 0)
break;
idx++;
@@ -2365,7 +2366,7 @@ static inline size_t bridge_nlmsg_size(void)
static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
{
struct net *net = dev_net(dev);
- struct net_device *master = dev->master;
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
struct sk_buff *skb;
int err = -EOPNOTSUPP;
@@ -2376,8 +2377,8 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
}
if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
- master && master->netdev_ops->ndo_bridge_getlink) {
- err = master->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
+ br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
+ err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev);
if (err < 0)
goto errout;
}
@@ -2436,13 +2437,14 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
oflags = flags;
if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
- if (!dev->master ||
- !dev->master->netdev_ops->ndo_bridge_setlink) {
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+
+ if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
err = -EOPNOTSUPP;
goto out;
}
- err = dev->master->netdev_ops->ndo_bridge_setlink(dev, nlh);
+ err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
if (err)
goto out;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3ab989b0de42..2568c449fe36 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -155,8 +155,9 @@ static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
*/
#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
__kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
-void *__kmalloc_reserve(size_t size, gfp_t flags, int node, unsigned long ip,
- bool *pfmemalloc)
+
+static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
+ unsigned long ip, bool *pfmemalloc)
{
void *obj;
bool ret_pfmemalloc = false;
@@ -259,6 +260,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
skb->end = skb->tail + size;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->mac_header = ~0U;
+ skb->transport_header = ~0U;
#endif
/* make sure we initialize shinfo sequentially */
@@ -327,6 +329,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
skb->end = skb->tail + size;
#ifdef NET_SKBUFF_DATA_USES_OFFSET
skb->mac_header = ~0U;
+ skb->transport_header = ~0U;
#endif
/* make sure we initialize shinfo sequentially */
@@ -1649,7 +1652,7 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
static struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
- struct sk_buff *skb, struct sock *sk)
+ struct sock *sk)
{
struct page_frag *pfrag = sk_page_frag(sk);
@@ -1682,14 +1685,14 @@ static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
static bool spd_fill_page(struct splice_pipe_desc *spd,
struct pipe_inode_info *pipe, struct page *page,
unsigned int *len, unsigned int offset,
- struct sk_buff *skb, bool linear,
+ bool linear,
struct sock *sk)
{
if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
return true;
if (linear) {
- page = linear_to_page(page, len, &offset, skb, sk);
+ page = linear_to_page(page, len, &offset, sk);
if (!page)
return true;
}
@@ -1706,23 +1709,9 @@ static bool spd_fill_page(struct splice_pipe_desc *spd,
return false;
}
-static inline void __segment_seek(struct page **page, unsigned int *poff,
- unsigned int *plen, unsigned int off)
-{
- unsigned long n;
-
- *poff += off;
- n = *poff / PAGE_SIZE;
- if (n)
- *page = nth_page(*page, n);
-
- *poff = *poff % PAGE_SIZE;
- *plen -= off;
-}
-
static bool __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
- unsigned int *len, struct sk_buff *skb,
+ unsigned int *len,
struct splice_pipe_desc *spd, bool linear,
struct sock *sk,
struct pipe_inode_info *pipe)
@@ -1737,23 +1726,19 @@ static bool __splice_segment(struct page *page, unsigned int poff,
}
/* ignore any bits we already processed */
- if (*off) {
- __segment_seek(&page, &poff, &plen, *off);
- *off = 0;
- }
+ poff += *off;
+ plen -= *off;
+ *off = 0;
do {
unsigned int flen = min(*len, plen);
- /* the linear region may spread across several pages */
- flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
-
- if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
+ if (spd_fill_page(spd, pipe, page, &flen, poff,
+ linear, sk))
return true;
-
- __segment_seek(&page, &poff, &plen, flen);
+ poff += flen;
+ plen -= flen;
*len -= flen;
-
} while (*len && plen);
return false;
@@ -1777,7 +1762,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd,
+ offset, len, spd,
skb_head_is_locked(skb),
sk, pipe))
return true;
@@ -1790,7 +1775,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
if (__splice_segment(skb_frag_page(f),
f->page_offset, skb_frag_size(f),
- offset, len, skb, spd, false, sk, pipe))
+ offset, len, spd, false, sk, pipe))
return true;
}
@@ -2686,48 +2671,37 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
int len, int odd, struct sk_buff *skb),
void *from, int length)
{
- int frg_cnt = 0;
- skb_frag_t *frag = NULL;
- struct page *page = NULL;
- int copy, left;
+ int frg_cnt = skb_shinfo(skb)->nr_frags;
+ int copy;
int offset = 0;
int ret;
+ struct page_frag *pfrag = &current->task_frag;
do {
/* Return error if we don't have space for new frag */
- frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt >= MAX_SKB_FRAGS)
- return -EFAULT;
-
- /* allocate a new page for next frag */
- page = alloc_pages(sk->sk_allocation, 0);
+ return -EMSGSIZE;
- /* If alloc_page fails just return failure and caller will
- * free previous allocated pages by doing kfree_skb()
- */
- if (page == NULL)
+ if (!sk_page_frag_refill(sk, pfrag))
return -ENOMEM;
- /* initialize the next frag */
- skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
- skb->truesize += PAGE_SIZE;
- atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
-
- /* get the new initialized frag */
- frg_cnt = skb_shinfo(skb)->nr_frags;
- frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
-
/* copy the user data to page */
- left = PAGE_SIZE - frag->page_offset;
- copy = (length > left)? left : length;
+ copy = min_t(int, length, pfrag->size - pfrag->offset);
- ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag),
- offset, copy, 0, skb);
+ ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
+ offset, copy, 0, skb);
if (ret < 0)
return -EFAULT;
/* copy was successful so update the size parameters */
- skb_frag_size_add(frag, copy);
+ skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
+ copy);
+ frg_cnt++;
+ pfrag->offset += copy;
+ get_page(pfrag->page);
+
+ skb->truesize += copy;
+ atomic_add(copy, &sk->sk_wmem_alloc);
skb->len += copy;
skb->data_len += copy;
offset += copy;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index e32083d5d8f8..f4345582a6b9 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -203,10 +203,10 @@ dsa_slave_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void dsa_slave_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, "dsa", 32);
- strncpy(drvinfo->version, dsa_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "platform", 32);
+ strlcpy(drvinfo->driver, "dsa", sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, dsa_driver_version, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int dsa_slave_nway_reset(struct net_device *dev)
@@ -391,7 +391,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
if (p->phy != NULL) {
phy_attach(slave_dev, dev_name(&p->phy->dev),
- 0, PHY_INTERFACE_MODE_GMII);
+ PHY_INTERFACE_MODE_GMII);
p->phy->autoneg = AUTONEG_ENABLE;
p->phy->speed = 0;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 4efad533e5f6..bc39c8c8f589 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -290,8 +290,6 @@ int eth_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- /* if device marked as NET_ADDR_RANDOM, reset it */
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
return 0;
}
EXPORT_SYMBOL(eth_mac_addr);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 24b384b7903e..4fdf96710d45 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1705,12 +1705,11 @@ static struct packet_type ip_packet_type __read_mostly = {
static int __init inet_init(void)
{
- struct sk_buff *dummy_skb;
struct inet_protosw *q;
struct list_head *r;
int rc = -EINVAL;
- BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
if (!sysctl_local_reserved_ports)
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 5cd75e2dab2c..99f00d39d10b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -974,7 +974,7 @@ static void nl_fib_input(struct sk_buff *skb)
nl_fib_lookup(frn, tb);
- portid = NETLINK_CB(skb).portid; /* pid of sending process */
+ portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b236ef04914f..ef54377fb11c 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -232,7 +232,8 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
*
* return false if we decode an option that should not be.
*/
-bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
+bool cookie_check_timestamp(struct tcp_options_received *tcp_opt,
+ struct net *net, bool *ecn_ok)
{
/* echoed timestamp, lowest bits contain options */
u32 options = tcp_opt->rcv_tsecr & TSMASK;
@@ -247,7 +248,7 @@ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok)
tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0;
*ecn_ok = (options >> 5) & 1;
- if (*ecn_ok && !sysctl_tcp_ecn)
+ if (*ecn_ok && !net->ipv4.sysctl_tcp_ecn)
return false;
if (tcp_opt->sack_ok && !sysctl_tcp_sack)
@@ -295,7 +296,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
- if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+ if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
goto out;
ret = NULL;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d84400b65049..a25e1d286b99 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -232,8 +232,8 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
return 0;
}
-int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
struct tcp_fastopen_context *ctxt;
@@ -538,13 +538,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "tcp_ecn",
- .data = &sysctl_tcp_ecn,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "tcp_dsack",
.data = &sysctl_tcp_dsack,
.maxlen = sizeof(int),
@@ -850,6 +843,13 @@ static struct ctl_table ipv4_net_table[] = {
.proc_handler = ipv4_ping_group_range,
},
{
+ .procname = "tcp_ecn",
+ .data = &init_net.ipv4.sysctl_tcp_ecn,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
.procname = "tcp_mem",
.maxlen = sizeof(init_net.ipv4.sysctl_tcp_mem),
.mode = 0644,
@@ -882,6 +882,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
&net->ipv4.sysctl_icmp_ratemask;
table[6].data =
&net->ipv4.sysctl_ping_group_range;
+ table[7].data =
+ &net->ipv4.sysctl_tcp_ecn;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 18f97ca76b00..0905997e5873 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -81,8 +81,6 @@ int sysctl_tcp_sack __read_mostly = 1;
int sysctl_tcp_fack __read_mostly = 1;
int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
EXPORT_SYMBOL(sysctl_tcp_reordering);
-int sysctl_tcp_ecn __read_mostly = 2;
-EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 54139fa514e6..c6ce9ca98d23 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1568,7 +1568,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
goto drop_and_free;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, skb);
+ TCP_ECN_create_request(req, skb, sock_net(sk));
if (want_cookie) {
isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -2888,6 +2888,7 @@ EXPORT_SYMBOL(tcp_prot);
static int __net_init tcp_sk_init(struct net *net)
{
+ net->ipv4.sysctl_tcp_ecn = 2;
return 0;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5d451593ef16..667a6adfccf8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -314,7 +314,7 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0;
- if (sysctl_tcp_ecn == 1) {
+ if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) {
TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
tp->ecn_flags = TCP_ECN_OK;
}
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 4ea244891b58..309af19a0a0a 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -40,7 +40,7 @@ obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o
obj-$(CONFIG_IPV6_GRE) += ip6_gre.o
-obj-y += addrconf_core.o exthdrs_core.o
+obj-y += addrconf_core.o exthdrs_core.o ip6_checksum.o
obj-$(CONFIG_INET) += output_core.o protocol.o $(ipv6-offload)
obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b043c60429bd..6b793bfc0e10 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -811,11 +811,10 @@ static struct pernet_operations inet6_net_ops = {
static int __init inet6_init(void)
{
- struct sk_buff *dummy_skb;
struct list_head *r;
int err = 0;
- BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct inet6_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
/* Register the socket-side information for inet6_create. */
for (r = &inetsw6[0]; r < &inetsw6[SOCK_MAX]; ++r)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8edf2601065a..33be36398a78 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -30,6 +30,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/tcp_states.h>
+#include <net/dsfield.h>
#include <linux/errqueue.h>
#include <asm/uaccess.h>
@@ -356,12 +357,11 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
sin->sin6_port = serr->port;
sin->sin6_scope_id = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
- sin->sin6_addr =
- *(struct in6_addr *)(nh + serr->addr_offset);
+ const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
+ struct ipv6hdr, daddr);
+ sin->sin6_addr = ip6h->daddr;
if (np->sndflow)
- sin->sin6_flowinfo =
- (*(__be32 *)(nh + serr->addr_offset - 24) &
- IPV6_FLOWINFO_MASK);
+ sin->sin6_flowinfo = ip6_flowinfo(ip6h);
if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin->sin6_scope_id = IP6CB(skb)->iif;
} else {
@@ -488,13 +488,14 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
}
if (np->rxopt.bits.rxtclass) {
- int tclass = ipv6_tclass(ipv6_hdr(skb));
+ int tclass = ipv6_get_dsfield(ipv6_hdr(skb));
put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass);
}
- if (np->rxopt.bits.rxflow && (*(__be32 *)nh & IPV6_FLOWINFO_MASK)) {
- __be32 flowinfo = *(__be32 *)nh & IPV6_FLOWINFO_MASK;
- put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
+ if (np->rxopt.bits.rxflow) {
+ __be32 flowinfo = ip6_flowinfo((struct ipv6hdr *)nh);
+ if (flowinfo)
+ put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo);
}
/* HbH is allowed only once */
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 473f628f9f20..07a7d65a7cb6 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -553,7 +553,8 @@ static bool ipv6_hop_ra(struct sk_buff *skb, int optoff)
const unsigned char *nh = skb_network_header(skb);
if (nh[optoff + 1] == 2) {
- IP6CB(skb)->ra = optoff;
+ IP6CB(skb)->flags |= IP6SKB_ROUTERALERT;
+ memcpy(&IP6CB(skb)->ra, nh + optoff + 2, sizeof(IP6CB(skb)->ra));
return true;
}
LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n",
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
new file mode 100644
index 000000000000..72d198b8e4d2
--- /dev/null
+++ b/net/ipv6/ip6_checksum.c
@@ -0,0 +1,97 @@
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udplite.h>
+#include <asm/checksum.h>
+
+#ifndef _HAVE_ARCH_IPV6_CSUM
+__sum16 csum_ipv6_magic(const struct in6_addr *saddr,
+ const struct in6_addr *daddr,
+ __u32 len, unsigned short proto,
+ __wsum csum)
+{
+
+ int carry;
+ __u32 ulen;
+ __u32 uproto;
+ __u32 sum = (__force u32)csum;
+
+ sum += (__force u32)saddr->s6_addr32[0];
+ carry = (sum < (__force u32)saddr->s6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)saddr->s6_addr32[1];
+ carry = (sum < (__force u32)saddr->s6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)saddr->s6_addr32[2];
+ carry = (sum < (__force u32)saddr->s6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)saddr->s6_addr32[3];
+ carry = (sum < (__force u32)saddr->s6_addr32[3]);
+ sum += carry;
+
+ sum += (__force u32)daddr->s6_addr32[0];
+ carry = (sum < (__force u32)daddr->s6_addr32[0]);
+ sum += carry;
+
+ sum += (__force u32)daddr->s6_addr32[1];
+ carry = (sum < (__force u32)daddr->s6_addr32[1]);
+ sum += carry;
+
+ sum += (__force u32)daddr->s6_addr32[2];
+ carry = (sum < (__force u32)daddr->s6_addr32[2]);
+ sum += carry;
+
+ sum += (__force u32)daddr->s6_addr32[3];
+ carry = (sum < (__force u32)daddr->s6_addr32[3]);
+ sum += carry;
+
+ ulen = (__force u32)htonl((__u32) len);
+ sum += ulen;
+ carry = (sum < ulen);
+ sum += carry;
+
+ uproto = (__force u32)htonl(proto);
+ sum += uproto;
+ carry = (sum < uproto);
+ sum += carry;
+
+ return csum_fold((__force __wsum)sum);
+}
+EXPORT_SYMBOL(csum_ipv6_magic);
+#endif
+
+int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
+{
+ int err;
+
+ UDP_SKB_CB(skb)->partial_cov = 0;
+ UDP_SKB_CB(skb)->cscov = skb->len;
+
+ if (proto == IPPROTO_UDPLITE) {
+ err = udplite_checksum_init(skb, uh);
+ if (err)
+ return err;
+ }
+
+ if (uh->check == 0) {
+ /* RFC 2460 section 8.1 says that we SHOULD log
+ this error. Well, it is reasonable.
+ */
+ LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
+ return 1;
+ }
+ if (skb->ip_summed == CHECKSUM_COMPLETE &&
+ !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
+ skb->len, proto, skb->csum))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!skb_csum_unnecessary(skb))
+ skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len, proto, 0));
+
+ return 0;
+}
+EXPORT_SYMBOL(udp6_csum_init);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c727e4712751..db91fe3466a3 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -772,9 +772,7 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
* Push down and install the IP header.
*/
ipv6h = ipv6_hdr(skb);
- *(__be32 *)ipv6h = fl6->flowlabel | htonl(0x60000000);
- dsfield = INET_ECN_encapsulate(0, dsfield);
- ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+ ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
ipv6h->hop_limit = tunnel->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
@@ -1240,7 +1238,7 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
__be16 *p = (__be16 *)(ipv6h+1);
- *(__be32 *)ipv6h = t->fl.u.ip6.flowlabel | htonl(0x60000000);
+ ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = NEXTHDR_GRE;
ipv6h->saddr = t->parms.laddr;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index a52d864d562b..4ac5bf30e16a 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -212,7 +212,7 @@ resubmit:
if (ipv6_addr_is_multicast(&hdr->daddr) &&
!ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
&hdr->saddr) &&
- !ipv6_is_mld(skb, nexthdr))
+ !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
goto discard;
}
if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
@@ -280,10 +280,8 @@ int ip6_mc_input(struct sk_buff *skb)
struct inet6_skb_parm *opt = IP6CB(skb);
/* Check for MLD */
- if (unlikely(opt->ra)) {
+ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
/* Check if this is a mld message */
- u8 *ptr = skb_network_header(skb) + opt->ra;
- struct icmp6hdr *icmp6;
u8 nexthdr = hdr->nexthdr;
__be16 frag_off;
int offset;
@@ -291,7 +289,7 @@ int ip6_mc_input(struct sk_buff *skb)
/* Check if the value of Router Alert
* is for MLD (0x0000).
*/
- if ((ptr[2] | ptr[3]) == 0) {
+ if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
deliver = false;
if (!ipv6_ext_hdr(nexthdr)) {
@@ -303,24 +301,10 @@ int ip6_mc_input(struct sk_buff *skb)
if (offset < 0)
goto out;
- if (nexthdr != IPPROTO_ICMPV6)
+ if (!ipv6_is_mld(skb, nexthdr, offset))
goto out;
- if (!pskb_may_pull(skb, (skb_network_header(skb) +
- offset + 1 - skb->data)))
- goto out;
-
- icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
-
- switch (icmp6->icmp6_type) {
- case ICMPV6_MGM_QUERY:
- case ICMPV6_MGM_REPORT:
- case ICMPV6_MGM_REDUCTION:
- case ICMPV6_MLD2_REPORT:
- deliver = true;
- break;
- }
- goto out;
+ deliver = true;
}
/* unknown RA - process it normally */
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 5552d13ae92f..9250c696de9d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -216,7 +216,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;
+ ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
@@ -267,7 +267,7 @@ int ip6_nd_hdr(struct sock *sk, struct sk_buff *skb, struct net_device *dev,
skb_put(skb, sizeof(struct ipv6hdr));
hdr = ipv6_hdr(skb);
- *(__be32*)hdr = htonl(0x60000000);
+ ip6_flow_hdr(hdr, 0, 0);
hdr->payload_len = htons(len);
hdr->nexthdr = proto;
@@ -1548,9 +1548,7 @@ int ip6_push_pending_frames(struct sock *sk)
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
- *(__be32*)hdr = fl6->flowlabel |
- htonl(0x60000000 | ((int)np->cork.tclass << 20));
-
+ ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
hdr->hop_limit = np->cork.hop_limit;
hdr->nexthdr = proto;
hdr->saddr = fl6->saddr;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index a14f28b280f5..fff83cbc197f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1030,9 +1030,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
- *(__be32*)ipv6h = fl6->flowlabel | htonl(0x60000000);
- dsfield = INET_ECN_encapsulate(0, dsfield);
- ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield);
+ ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
ipv6h->hop_limit = t->parms.hop_limit;
ipv6h->nexthdr = proto;
ipv6h->saddr = fl6->saddr;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 28dfa5f3801f..8237ee15eafd 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -935,33 +935,6 @@ int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
}
/*
- * identify MLD packets for MLD filter exceptions
- */
-bool ipv6_is_mld(struct sk_buff *skb, int nexthdr)
-{
- struct icmp6hdr *pic;
-
- if (nexthdr != IPPROTO_ICMPV6)
- return false;
-
- if (!pskb_may_pull(skb, sizeof(struct icmp6hdr)))
- return false;
-
- pic = icmp6_hdr(skb);
-
- switch (pic->icmp6_type) {
- case ICMPV6_MGM_QUERY:
- case ICMPV6_MGM_REPORT:
- case ICMPV6_MGM_REDUCTION:
- case ICMPV6_MLD2_REPORT:
- return true;
- default:
- break;
- }
- return false;
-}
-
-/*
* check if the interface/address pair is valid
*/
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 6574175795df..5733cd27f4a3 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -395,7 +395,7 @@ static struct sk_buff *ndisc_build_skb(struct net_device *dev,
len += ndisc_opt_addr_space(dev);
skb = sock_alloc_send_skb(sk,
- (MAX_HEADER + sizeof(struct ipv6hdr) +
+ (sizeof(struct ipv6hdr) +
len + hlen + tlen),
1, &err);
if (!skb) {
@@ -1355,12 +1355,11 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
struct sock *sk = net->ipv6.ndisc_sk;
- int len = sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
+ int len = sizeof(struct rd_msg);
struct inet_peer *peer;
struct sk_buff *buff;
- struct icmp6hdr *icmph;
+ struct rd_msg *msg;
struct in6_addr saddr_buf;
- struct in6_addr *addrp;
struct rt6_info *rt;
struct dst_entry *dst;
struct inet6_dev *idev;
@@ -1439,7 +1438,7 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
buff = sock_alloc_send_skb(sk,
- (MAX_HEADER + sizeof(struct ipv6hdr) +
+ (sizeof(struct ipv6hdr) +
len + hlen + tlen),
1, &err);
if (buff == NULL) {
@@ -1455,21 +1454,19 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data);
skb_put(buff, len);
- icmph = icmp6_hdr(buff);
+ msg = (struct rd_msg *)icmp6_hdr(buff);
- memset(icmph, 0, sizeof(struct icmp6hdr));
- icmph->icmp6_type = NDISC_REDIRECT;
+ memset(&msg->icmph, 0, sizeof(struct icmp6hdr));
+ msg->icmph.icmp6_type = NDISC_REDIRECT;
/*
* copy target and destination addresses
*/
- addrp = (struct in6_addr *)(icmph + 1);
- *addrp = *target;
- addrp++;
- *addrp = ipv6_hdr(skb)->daddr;
+ msg->target = *target;
+ msg->dest = ipv6_hdr(skb)->daddr;
- opt = (u8*) (addrp + 1);
+ opt = msg->opt;
/*
* include target_address option
@@ -1490,9 +1487,9 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
memcpy(opt, ipv6_hdr(skb), rd_len - 8);
- icmph->icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr,
- len, IPPROTO_ICMPV6,
- csum_partial(icmph, len, 0));
+ msg->icmph.icmp6_cksum = csum_ipv6_magic(&saddr_buf, &ipv6_hdr(skb)->saddr,
+ len, IPPROTO_ICMPV6,
+ csum_partial(msg, len, 0));
skb_dst_set(buff, dst);
rcu_read_lock();
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 029623dbd411..ed3b427b2841 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -126,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
skb_put(nskb, sizeof(struct ipv6hdr));
skb_reset_network_header(nskb);
ip6h = ipv6_hdr(nskb);
- *(__be32 *)ip6h = htonl(0x60000000 | (tclass << 20));
+ ip6_flow_hdr(ip6h, tclass, 0);
ip6h->hop_limit = ip6_dst_hoplimit(dst);
ip6h->nexthdr = IPPROTO_TCP;
ip6h->saddr = oip6h->daddr;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e229a3bc345d..7c34c01b515b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -388,15 +388,8 @@ static int rt6_info_hash_nhsfn(unsigned int candidate_count,
{
unsigned int val = fl6->flowi6_proto;
- val ^= (__force u32)fl6->daddr.s6_addr32[0];
- val ^= (__force u32)fl6->daddr.s6_addr32[1];
- val ^= (__force u32)fl6->daddr.s6_addr32[2];
- val ^= (__force u32)fl6->daddr.s6_addr32[3];
-
- val ^= (__force u32)fl6->saddr.s6_addr32[0];
- val ^= (__force u32)fl6->saddr.s6_addr32[1];
- val ^= (__force u32)fl6->saddr.s6_addr32[2];
- val ^= (__force u32)fl6->saddr.s6_addr32[3];
+ val ^= ipv6_addr_hash(&fl6->daddr);
+ val ^= ipv6_addr_hash(&fl6->saddr);
/* Work only if this not encapsulated */
switch (fl6->flowi6_proto) {
@@ -994,7 +987,7 @@ void ip6_route_input(struct sk_buff *skb)
.flowi6_iif = skb->dev->ifindex,
.daddr = iph->daddr,
.saddr = iph->saddr,
- .flowlabel = (* (__be32 *) iph) & IPV6_FLOWINFO_MASK,
+ .flowlabel = ip6_flowinfo(iph),
.flowi6_mark = skb->mark,
.flowi6_proto = iph->nexthdr,
};
@@ -1159,7 +1152,7 @@ void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
fl6.flowi6_flags = 0;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
- fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+ fl6.flowlabel = ip6_flowinfo(iph);
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
@@ -1187,7 +1180,7 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
fl6.flowi6_flags = 0;
fl6.daddr = iph->daddr;
fl6.saddr = iph->saddr;
- fl6.flowlabel = (*(__be32 *) iph) & IPV6_FLOWINFO_MASK;
+ fl6.flowlabel = ip6_flowinfo(iph);
dst = ip6_route_output(net, NULL, &fl6);
if (!dst->error)
@@ -1705,37 +1698,33 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
struct net *net = dev_net(skb->dev);
struct netevent_redirect netevent;
struct rt6_info *rt, *nrt = NULL;
- const struct in6_addr *target;
struct ndisc_options ndopts;
- const struct in6_addr *dest;
struct neighbour *old_neigh;
struct inet6_dev *in6_dev;
struct neighbour *neigh;
- struct icmp6hdr *icmph;
+ struct rd_msg *msg;
int optlen, on_link;
u8 *lladdr;
optlen = skb->tail - skb->transport_header;
- optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr);
+ optlen -= sizeof(*msg);
if (optlen < 0) {
net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
return;
}
- icmph = icmp6_hdr(skb);
- target = (const struct in6_addr *) (icmph + 1);
- dest = target + 1;
+ msg = (struct rd_msg *)icmp6_hdr(skb);
- if (ipv6_addr_is_multicast(dest)) {
+ if (ipv6_addr_is_multicast(&msg->dest)) {
net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
return;
}
on_link = 0;
- if (ipv6_addr_equal(dest, target)) {
+ if (ipv6_addr_equal(&msg->dest, &msg->target)) {
on_link = 1;
- } else if (ipv6_addr_type(target) !=
+ } else if (ipv6_addr_type(&msg->target) !=
(IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
return;
@@ -1752,7 +1741,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
* first-hop router for the specified ICMP Destination Address.
*/
- if (!ndisc_parse_options((u8*)(dest + 1), optlen, &ndopts)) {
+ if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
return;
}
@@ -1779,7 +1768,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
*/
dst_confirm(&rt->dst);
- neigh = __neigh_lookup(&nd_tbl, target, skb->dev, 1);
+ neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
if (!neigh)
return;
@@ -1799,7 +1788,7 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
NEIGH_UPDATE_F_ISROUTER))
);
- nrt = ip6_rt_copy(rt, dest);
+ nrt = ip6_rt_copy(rt, &msg->dest);
if (!nrt)
goto out;
@@ -1814,10 +1803,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
goto out;
netevent.old = &rt->dst;
- netevent.old_neigh = old_neigh;
netevent.new = &nrt->dst;
- netevent.new_neigh = neigh;
- netevent.daddr = dest;
+ netevent.daddr = &msg->dest;
+ netevent.neigh = neigh;
call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
if (rt->rt6i_flags & RTF_CACHE) {
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 40161977f7cf..8a0848b60b35 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -179,7 +179,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
memset(&tcp_opt, 0, sizeof(tcp_opt));
tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL);
- if (!cookie_check_timestamp(&tcp_opt, &ecn_ok))
+ if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
goto out;
ret = NULL;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 93825dd3a7c0..3701c3c6e2eb 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1027,7 +1027,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
treq->rmt_addr = ipv6_hdr(skb)->saddr;
treq->loc_addr = ipv6_hdr(skb)->daddr;
if (!want_cookie || tmp_opt.tstamp_ok)
- TCP_ECN_create_request(req, skb);
+ TCP_ECN_create_request(req, skb, sock_net(sk));
treq->iif = sk->sk_bound_dev_if;
@@ -1163,7 +1163,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
+ newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1243,7 +1243,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
+ newnp->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
/* Clone native IPv6 options from listening socket (if any)
@@ -1456,7 +1456,7 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (np->rxopt.bits.rxtclass)
- np->rcv_tclass = ipv6_tclass(ipv6_hdr(skb));
+ np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index dfaa29b8b293..1afb635d9b57 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -752,40 +752,6 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
return 0;
}
-static inline int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh,
- int proto)
-{
- int err;
-
- UDP_SKB_CB(skb)->partial_cov = 0;
- UDP_SKB_CB(skb)->cscov = skb->len;
-
- if (proto == IPPROTO_UDPLITE) {
- err = udplite_checksum_init(skb, uh);
- if (err)
- return err;
- }
-
- if (uh->check == 0) {
- /* RFC 2460 section 8.1 says that we SHOULD log
- this error. Well, it is reasonable.
- */
- LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
- return 1;
- }
- if (skb->ip_summed == CHECKSUM_COMPLETE &&
- !csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
- skb->len, proto, skb->csum))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- if (!skb_csum_unnecessary(skb))
- skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- skb->len, proto, 0));
-
- return 0;
-}
-
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index ee5a7065aacc..babd1674388a 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -72,7 +72,7 @@ static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *ad
{
unsigned int h;
- h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
+ h = ipv6_addr_hash((const struct in6_addr *)addr);
h ^= h >> 16;
h ^= h >> 8;
h &= XFRM6_TUNNEL_SPI_BYADDR_HSIZE - 1;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c0353d55d56f..74827e3b26a1 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2185,7 +2185,6 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
static int __init netlink_proto_init(void)
{
- struct sk_buff *dummy_skb;
int i;
unsigned long limit;
unsigned int order;
@@ -2194,7 +2193,7 @@ static int __init netlink_proto_init(void)
if (err != 0)
goto out;
- BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL);
if (!nl_table)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f996db343247..d8c13a965459 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1989,10 +1989,9 @@ static struct pernet_operations ovs_net_ops = {
static int __init dp_init(void)
{
- struct sk_buff *dummy_skb;
int err;
- BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n");
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 5d460c37df07..0531de6c7a4a 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -69,7 +69,6 @@ static int internal_dev_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
return 0;
}
@@ -98,7 +97,7 @@ static int internal_dev_stop(struct net_device *netdev)
static void internal_dev_getinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "openvswitch");
+ strlcpy(info->driver, "openvswitch", sizeof(info->driver));
}
static const struct ethtool_ops internal_dev_ethtool_ops = {
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 05996d0dd828..5b0fd291babb 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/kernel.h>
#include <linux/net.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
@@ -792,10 +793,9 @@ static const struct net_proto_family rxrpc_family_ops = {
*/
static int __init af_rxrpc_init(void)
{
- struct sk_buff *dummy_skb;
int ret = -1;
- BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb));
rxrpc_epoch = htonl(get_seconds());
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 65d240cbf74b..8579c4bb20c9 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -485,8 +485,9 @@ errout:
return err;
}
-struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind)
+struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr,
+ int bind)
{
struct tc_action *a;
struct tc_action_ops *a_o;
@@ -542,9 +543,9 @@ struct tc_action *tcf_action_init_1(struct nlattr *nla, struct nlattr *est,
/* backward compatibility for policer */
if (name == NULL)
- err = a_o->init(tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
+ err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
else
- err = a_o->init(nla, est, a, ovr, bind);
+ err = a_o->init(net, nla, est, a, ovr, bind);
if (err < 0)
goto err_free;
@@ -566,8 +567,9 @@ err_out:
return ERR_PTR(err);
}
-struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
- char *name, int ovr, int bind)
+struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *name, int ovr,
+ int bind)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
struct tc_action *head = NULL, *act, *act_prev = NULL;
@@ -579,7 +581,7 @@ struct tc_action *tcf_action_init(struct nlattr *nla, struct nlattr *est,
return ERR_PTR(err);
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
- act = tcf_action_init_1(tb[i], est, name, ovr, bind);
+ act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
if (IS_ERR(act))
goto err;
act->order = i;
@@ -960,7 +962,7 @@ tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
struct tc_action *a;
u32 seq = n->nlmsg_seq;
- act = tcf_action_init(nla, NULL, NULL, ovr, 0);
+ act = tcf_action_init(net, nla, NULL, NULL, ovr, 0);
if (act == NULL)
goto done;
if (IS_ERR(act)) {
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 2c8ad7c86e43..08fa1e8a4ca4 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -51,7 +51,7 @@ static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
};
-static int tcf_csum_init(struct nlattr *nla, struct nlattr *est,
+static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
struct nlattr *tb[TCA_CSUM_MAX + 1];
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 05d60859d8e3..fd2b3cff5fa2 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -58,8 +58,9 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
[TCA_GACT_PROB] = { .len = sizeof(struct tc_gact_p) },
};
-static int tcf_gact_init(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_gact_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
{
struct nlattr *tb[TCA_GACT_MAX + 1];
struct tc_gact *parm;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 58fb3c7aab9e..0fb9e3f567e6 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -102,7 +102,7 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
[TCA_IPT_TARG] = { .len = sizeof(struct xt_entry_target) },
};
-static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est,
+static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
struct nlattr *tb[TCA_IPT_MAX + 1];
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 9c0fd0c78814..5d676edc22a6 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -62,8 +62,9 @@ static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
[TCA_MIRRED_PARMS] = { .len = sizeof(struct tc_mirred) },
};
-static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_mirred_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a, int ovr,
+ int bind)
{
struct nlattr *tb[TCA_MIRRED_MAX + 1];
struct tc_mirred *parm;
@@ -88,7 +89,7 @@ static int tcf_mirred_init(struct nlattr *nla, struct nlattr *est,
return -EINVAL;
}
if (parm->ifindex) {
- dev = __dev_get_by_index(&init_net, parm->ifindex);
+ dev = __dev_get_by_index(net, parm->ifindex);
if (dev == NULL)
return -ENODEV;
switch (dev->type) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index b5d029eb44f2..876f0ef29694 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -44,7 +44,7 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
[TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
};
-static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
+static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_action *a, int ovr, int bind)
{
struct nlattr *tb[TCA_NAT_MAX + 1];
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45c53ab067a6..0c3faddf3f2c 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -38,8 +38,9 @@ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
};
-static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_pedit_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
{
struct nlattr *tb[TCA_PEDIT_MAX + 1];
struct tc_pedit *parm;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a9de23297d47..8dbd695c160b 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -130,8 +130,9 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
[TCA_POLICE_RESULT] = { .type = NLA_U32 },
};
-static int tcf_act_police_locate(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
{
unsigned int h;
int ret = 0, err;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 3714f60f0b3c..7725eb4ab756 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -95,8 +95,9 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
[TCA_DEF_DATA] = { .type = NLA_STRING, .len = SIMP_MAX_DATA },
};
-static int tcf_simp_init(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_simp_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
{
struct nlattr *tb[TCA_DEF_MAX + 1];
struct tc_defact *parm;
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 476e0fac6712..cb4221171f93 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -67,8 +67,9 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
[TCA_SKBEDIT_MARK] = { .len = sizeof(u32) },
};
-static int tcf_skbedit_init(struct nlattr *nla, struct nlattr *est,
- struct tc_action *a, int ovr, int bind)
+static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action *a,
+ int ovr, int bind)
{
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ff55ed6c49b2..964f5e4f4b8a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -321,7 +321,7 @@ replay:
}
}
- err = tp->ops->change(skb, tp, cl, t->tcm_handle, tca, &fh);
+ err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh);
if (err == 0) {
if (tp_created) {
spin_lock_bh(root_lock);
@@ -508,7 +508,7 @@ void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
}
EXPORT_SYMBOL(tcf_exts_destroy);
-int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
struct nlattr *rate_tlv, struct tcf_exts *exts,
const struct tcf_ext_map *map)
{
@@ -519,7 +519,7 @@ int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
struct tc_action *act;
if (map->police && tb[map->police]) {
- act = tcf_action_init_1(tb[map->police], rate_tlv,
+ act = tcf_action_init_1(net, tb[map->police], rate_tlv,
"police", TCA_ACT_NOREPLACE,
TCA_ACT_BIND);
if (IS_ERR(act))
@@ -528,8 +528,9 @@ int tcf_exts_validate(struct tcf_proto *tp, struct nlattr **tb,
act->type = TCA_OLD_COMPAT;
exts->action = act;
} else if (map->action && tb[map->action]) {
- act = tcf_action_init(tb[map->action], rate_tlv, NULL,
- TCA_ACT_NOREPLACE, TCA_ACT_BIND);
+ act = tcf_action_init(net, tb[map->action], rate_tlv,
+ NULL, TCA_ACT_NOREPLACE,
+ TCA_ACT_BIND);
if (IS_ERR(act))
return PTR_ERR(act);
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index 344a11b342e5..d76a35d0dc85 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -132,15 +132,16 @@ static const struct nla_policy basic_policy[TCA_BASIC_MAX + 1] = {
[TCA_BASIC_EMATCHES] = { .type = NLA_NESTED },
};
-static int basic_set_parms(struct tcf_proto *tp, struct basic_filter *f,
- unsigned long base, struct nlattr **tb,
+static int basic_set_parms(struct net *net, struct tcf_proto *tp,
+ struct basic_filter *f, unsigned long base,
+ struct nlattr **tb,
struct nlattr *est)
{
int err = -EINVAL;
struct tcf_exts e;
struct tcf_ematch_tree t;
- err = tcf_exts_validate(tp, tb, est, &e, &basic_ext_map);
+ err = tcf_exts_validate(net, tp, tb, est, &e, &basic_ext_map);
if (err < 0)
return err;
@@ -162,7 +163,7 @@ errout:
return err;
}
-static int basic_change(struct sk_buff *in_skb,
+static int basic_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, unsigned long *arg)
{
@@ -182,7 +183,7 @@ static int basic_change(struct sk_buff *in_skb,
if (f != NULL) {
if (handle && f->handle != handle)
return -EINVAL;
- return basic_set_parms(tp, f, base, tb, tca[TCA_RATE]);
+ return basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
}
err = -ENOBUFS;
@@ -208,7 +209,7 @@ static int basic_change(struct sk_buff *in_skb,
f->handle = head->hgenerator;
}
- err = basic_set_parms(tp, f, base, tb, tca[TCA_RATE]);
+ err = basic_set_parms(net, tp, f, base, tb, tca[TCA_RATE]);
if (err < 0)
goto errout;
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 6db7855b9029..3a294eb98d61 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -178,7 +178,7 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
[TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
};
-static int cls_cgroup_change(struct sk_buff *in_skb,
+static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
@@ -215,7 +215,8 @@ static int cls_cgroup_change(struct sk_buff *in_skb,
if (err < 0)
return err;
- err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e,
+ &cgroup_ext_map);
if (err < 0)
return err;
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index ce82d0cb1b47..aa36a8c8b33b 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -351,7 +351,7 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_PERTURB] = { .type = NLA_U32 },
};
-static int flow_change(struct sk_buff *in_skb,
+static int flow_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle, struct nlattr **tca,
unsigned long *arg)
@@ -397,7 +397,7 @@ static int flow_change(struct sk_buff *in_skb,
return -EOPNOTSUPP;
}
- err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
if (err < 0)
return err;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 4075a0aef2aa..1135d8227f9b 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -192,7 +192,7 @@ static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
};
static int
-fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
+fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,
struct nlattr **tb, struct nlattr **tca, unsigned long base)
{
struct fw_head *head = (struct fw_head *)tp->root;
@@ -200,7 +200,7 @@ fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
u32 mask;
int err;
- err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &fw_ext_map);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &fw_ext_map);
if (err < 0)
return err;
@@ -233,7 +233,7 @@ errout:
return err;
}
-static int fw_change(struct sk_buff *in_skb,
+static int fw_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
@@ -255,7 +255,7 @@ static int fw_change(struct sk_buff *in_skb,
if (f != NULL) {
if (f->id != handle && handle)
return -EINVAL;
- return fw_change_attrs(tp, f, tb, tca, base);
+ return fw_change_attrs(net, tp, f, tb, tca, base);
}
if (!handle)
@@ -282,7 +282,7 @@ static int fw_change(struct sk_buff *in_skb,
f->id = handle;
- err = fw_change_attrs(tp, f, tb, tca, base);
+ err = fw_change_attrs(net, tp, f, tb, tca, base);
if (err < 0)
goto errout;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index c10d57bf98f2..37da567d833e 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -335,9 +335,10 @@ static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
[TCA_ROUTE4_IIF] = { .type = NLA_U32 },
};
-static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
- struct route4_filter *f, u32 handle, struct route4_head *head,
- struct nlattr **tb, struct nlattr *est, int new)
+static int route4_set_parms(struct net *net, struct tcf_proto *tp,
+ unsigned long base, struct route4_filter *f,
+ u32 handle, struct route4_head *head,
+ struct nlattr **tb, struct nlattr *est, int new)
{
int err;
u32 id = 0, to = 0, nhandle = 0x8000;
@@ -346,7 +347,7 @@ static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
struct route4_bucket *b;
struct tcf_exts e;
- err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
+ err = tcf_exts_validate(net, tp, tb, est, &e, &route_ext_map);
if (err < 0)
return err;
@@ -427,7 +428,7 @@ errout:
return err;
}
-static int route4_change(struct sk_buff *in_skb,
+static int route4_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
@@ -457,7 +458,7 @@ static int route4_change(struct sk_buff *in_skb,
if (f->bkt)
old_handle = f->handle;
- err = route4_set_parms(tp, base, f, handle, head, tb,
+ err = route4_set_parms(net, tp, base, f, handle, head, tb,
tca[TCA_RATE], 0);
if (err < 0)
return err;
@@ -480,7 +481,7 @@ static int route4_change(struct sk_buff *in_skb,
if (f == NULL)
goto errout;
- err = route4_set_parms(tp, base, f, handle, head, tb,
+ err = route4_set_parms(net, tp, base, f, handle, head, tb,
tca[TCA_RATE], 1);
if (err < 0)
goto errout;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index 494bbb90924a..252d8b05872e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -416,7 +416,7 @@ static const struct nla_policy rsvp_policy[TCA_RSVP_MAX + 1] = {
[TCA_RSVP_PINFO] = { .len = sizeof(struct tc_rsvp_pinfo) },
};
-static int rsvp_change(struct sk_buff *in_skb,
+static int rsvp_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base,
u32 handle,
struct nlattr **tca,
@@ -440,7 +440,7 @@ static int rsvp_change(struct sk_buff *in_skb,
if (err < 0)
return err;
- err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
+ err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, &rsvp_ext_map);
if (err < 0)
return err;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index a1293b4ab7a1..b86535a40169 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -197,9 +197,10 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
};
static int
-tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
- struct tcindex_data *p, struct tcindex_filter_result *r,
- struct nlattr **tb, struct nlattr *est)
+tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ u32 handle, struct tcindex_data *p,
+ struct tcindex_filter_result *r, struct nlattr **tb,
+ struct nlattr *est)
{
int err, balloc = 0;
struct tcindex_filter_result new_filter_result, *old_r = r;
@@ -208,7 +209,7 @@ tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
struct tcindex_filter *f = NULL; /* make gcc behave */
struct tcf_exts e;
- err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
+ err = tcf_exts_validate(net, tp, tb, est, &e, &tcindex_ext_map);
if (err < 0)
return err;
@@ -332,7 +333,7 @@ errout:
}
static int
-tcindex_change(struct sk_buff *in_skb,
+tcindex_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca, unsigned long *arg)
{
@@ -353,7 +354,8 @@ tcindex_change(struct sk_buff *in_skb,
if (err < 0)
return err;
- return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE]);
+ return tcindex_set_parms(net, tp, base, handle, p, r, tb,
+ tca[TCA_RATE]);
}
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c7c27bc91b5a..eb07a1e536e6 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -488,15 +488,15 @@ static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
[TCA_U32_MARK] = { .len = sizeof(struct tc_u32_mark) },
};
-static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
- struct tc_u_hnode *ht,
+static int u32_set_parms(struct net *net, struct tcf_proto *tp,
+ unsigned long base, struct tc_u_hnode *ht,
struct tc_u_knode *n, struct nlattr **tb,
struct nlattr *est)
{
int err;
struct tcf_exts e;
- err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
+ err = tcf_exts_validate(net, tp, tb, est, &e, &u32_ext_map);
if (err < 0)
return err;
@@ -544,7 +544,7 @@ errout:
return err;
}
-static int u32_change(struct sk_buff *in_skb,
+static int u32_change(struct net *net, struct sk_buff *in_skb,
struct tcf_proto *tp, unsigned long base, u32 handle,
struct nlattr **tca,
unsigned long *arg)
@@ -570,7 +570,8 @@ static int u32_change(struct sk_buff *in_skb,
if (TC_U32_KEY(n->handle) == 0)
return -EINVAL;
- return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
+ return u32_set_parms(net, tp, base, n->ht_up, n, tb,
+ tca[TCA_RATE]);
}
if (tb[TCA_U32_DIVISOR]) {
@@ -656,7 +657,7 @@ static int u32_change(struct sk_buff *in_skb,
}
#endif
- err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
+ err = u32_set_parms(net, tp, base, ht, n, tb, tca[TCA_RATE]);
if (err == 0) {
struct tc_u_knode **ins;
for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f898b1c58bd2..1c2e46cb9191 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -595,7 +595,7 @@ static void sctp_v4_ecn_capable(struct sock *sk)
INET_ECN_xmit(sk);
}
-void sctp_addr_wq_timeout_handler(unsigned long arg)
+static void sctp_addr_wq_timeout_handler(unsigned long arg)
{
struct net *net = (struct net *)arg;
struct sctp_sockaddr_entry *addrw, *temp;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 5b5c876c80e9..0c612361c153 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2426,9 +2426,8 @@ static struct pernet_operations unix_net_ops = {
static int __init af_unix_init(void)
{
int rc = -1;
- struct sk_buff *dummy_skb;
- BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
+ BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
rc = proto_register(&unix_proto, 1);
if (rc != 0) {
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index 48c48ffafa1d..e37862f1b127 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -15,10 +15,10 @@ static void cfg80211_get_drvinfo(struct net_device *dev,
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
if (wdev->wiphy->fw_version[0])
- strncpy(info->fw_version, wdev->wiphy->fw_version,
+ strlcpy(info->fw_version, wdev->wiphy->fw_version,
sizeof(info->fw_version));
else
- strncpy(info->fw_version, "N/A", sizeof(info->fw_version));
+ strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
strlcpy(info->bus_info, dev_name(wiphy_dev(wdev->wiphy)),
sizeof(info->bus_info));