summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/bitops/lock.h14
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/mISDNif.h2
-rw-r--r--include/linux/netdevice.h225
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/perf_event.h5
-rw-r--r--include/linux/sunrpc/auth.h7
-rw-r--r--include/linux/sunrpc/svc_rdma.h20
-rw-r--r--include/linux/thermal.h20
-rw-r--r--include/linux/trace_events.h7
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/inet6_connection_sock.h1
-rw-r--r--include/net/ip_tunnels.h16
-rw-r--r--include/net/ipv6.h6
-rw-r--r--include/net/ping.h1
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/structs.h2
-rw-r--r--include/net/vxlan.h16
-rw-r--r--include/trace/events/fib6.h2
-rw-r--r--include/trace/events/kmem.h42
-rw-r--r--include/trace/events/thermal.h16
-rw-r--r--include/trace/events/tlb.h4
-rw-r--r--include/trace/events/writeback.h121
-rw-r--r--include/uapi/linux/ethtool.h6
-rw-r--r--include/uapi/linux/if_link.h2
25 files changed, 282 insertions, 272 deletions
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index c30266e94806..8ef0ccbf8167 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -29,16 +29,16 @@ do { \
* @nr: the bit to set
* @addr: the address to start counting from
*
- * This operation is like clear_bit_unlock, however it is not atomic.
- * It does provide release barrier semantics so it can be used to unlock
- * a bit lock, however it would only be used if no other CPU can modify
- * any bits in the memory until the lock is released (a good example is
- * if the bit lock itself protects access to the other bits in the word).
+ * A weaker form of clear_bit_unlock() as used by __bit_lock_unlock(). If all
+ * the bits in the word are protected by this lock some archs can use weaker
+ * ops to safely unlock.
+ *
+ * See for example x86's implementation.
*/
#define __clear_bit_unlock(nr, addr) \
do { \
- smp_mb(); \
- __clear_bit(nr, addr); \
+ smp_mb__before_atomic(); \
+ clear_bit(nr, addr); \
} while (0)
#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index a13c52ccd8ac..2f7775e229b0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -618,7 +618,7 @@ do { \
#define do_trace_printk(fmt, args...) \
do { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
@@ -662,7 +662,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
*/
#define trace_puts(str) ({ \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
@@ -684,7 +684,7 @@ extern void trace_dump_stack(int skip);
#define ftrace_vprintk(fmt, vargs) \
do { \
if (__builtin_constant_p(fmt)) { \
- static const char *trace_printk_fmt \
+ static const char *trace_printk_fmt __used \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(fmt) ? fmt : NULL; \
\
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index 246a3529ecf6..ac02c54520e9 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -596,7 +596,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
}
extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
-extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
+extern void mISDN_clock_update(struct mISDNclock *, int, ktime_t *);
extern unsigned short mISDN_clock_get(void);
extern const char *mISDNDevName4ch(struct mISDNchannel *);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 009c85adae4c..cb0d5d09c2e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -81,8 +81,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
- * higher layers. Virtual network devices transmit synchronously, in this case
- * the driver transmit return codes are consumed by dev_queue_xmit(), all
+ * higher layers. Virtual network devices transmit synchronously; in this case
+ * the driver transmit return codes are consumed by dev_queue_xmit(), and all
* others are propagated to higher layers.
*/
@@ -129,7 +129,7 @@ static inline bool dev_xmit_complete(int rc)
}
/*
- * Compute the worst case header length according to the protocols
+ * Compute the worst-case header length according to the protocols
* used.
*/
@@ -246,7 +246,7 @@ struct hh_cache {
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
-/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
+/* Reserve HH_DATA_MOD byte-aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
@@ -272,7 +272,7 @@ struct header_ops {
};
/* These flag bits are private to the generic network queueing
- * layer, they may not be explicitly referenced by any other
+ * layer; they may not be explicitly referenced by any other
* code.
*/
@@ -286,7 +286,7 @@ enum netdev_state_t {
/*
- * This structure holds at boot time configured netdevice settings. They
+ * This structure holds boot-time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
@@ -304,7 +304,7 @@ struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
- * to the per-cpu poll_list, and whoever clears that bit
+ * to the per-CPU poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
@@ -350,7 +350,7 @@ typedef enum gro_result gro_result_t;
* @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
* case skb->dev was changed by rx_handler.
* @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
- * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
+ * @RX_HANDLER_PASS: Do nothing, pass the skb as if no rx_handler was called.
*
* rx_handlers are functions called from inside __netif_receive_skb(), to do
* special processing of the skb, prior to delivery to protocol handlers.
@@ -365,19 +365,19 @@ typedef enum gro_result gro_result_t;
* Upon return, rx_handler is expected to tell __netif_receive_skb() what to
* do with the skb.
*
- * If the rx_handler consumed to skb in some way, it should return
+ * If the rx_handler consumed the skb in some way, it should return
* RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
- * the skb to be delivered in some other ways.
+ * the skb to be delivered in some other way.
*
* If the rx_handler changed skb->dev, to divert the skb to another
* net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
* new device will be called if it exists.
*
- * If the rx_handler consider the skb should be ignored, it should return
+ * If the rx_handler decides the skb should be ignored, it should return
* RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
* are registered on exact device (ptype->dev == skb->dev).
*
- * If the rx_handler didn't changed skb->dev, but want the skb to be normally
+ * If the rx_handler didn't change skb->dev, but wants the skb to be normally
* delivered, it should return RX_HANDLER_PASS.
*
* A device without a registered rx_handler will behave as if rx_handler
@@ -402,11 +402,11 @@ static inline bool napi_disable_pending(struct napi_struct *n)
}
/**
- * napi_schedule_prep - check if napi can be scheduled
- * @n: napi context
+ * napi_schedule_prep - check if NAPI can be scheduled
+ * @n: NAPI context
*
* Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable
+ * it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
@@ -418,7 +418,7 @@ static inline bool napi_schedule_prep(struct napi_struct *n)
/**
* napi_schedule - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
@@ -431,7 +431,7 @@ static inline void napi_schedule(struct napi_struct *n)
/**
* napi_schedule_irqoff - schedule NAPI poll
- * @n: napi context
+ * @n: NAPI context
*
* Variant of napi_schedule(), assuming hard irqs are masked.
*/
@@ -455,7 +455,7 @@ void __napi_complete(struct napi_struct *n);
void napi_complete_done(struct napi_struct *n, int work_done);
/**
* napi_complete - NAPI processing complete
- * @n: napi context
+ * @n: NAPI context
*
* Mark NAPI processing as complete.
* Consider using napi_complete_done() instead.
@@ -467,32 +467,32 @@ static inline void napi_complete(struct napi_struct *n)
/**
* napi_hash_add - add a NAPI to global hashtable
- * @napi: napi context
+ * @napi: NAPI context
*
- * generate a new napi_id and store a @napi under it in napi_hash
- * Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
+ * Generate a new napi_id and store a @napi under it in napi_hash.
+ * Used for busy polling (CONFIG_NET_RX_BUSY_POLL).
* Note: This is normally automatically done from netif_napi_add(),
- * so might disappear in a future linux version.
+ * so might disappear in a future Linux version.
*/
void napi_hash_add(struct napi_struct *napi);
/**
* napi_hash_del - remove a NAPI from global table
- * @napi: napi context
+ * @napi: NAPI context
*
- * Warning: caller must observe rcu grace period
+ * Warning: caller must observe RCU grace period
* before freeing memory containing @napi, if
* this function returns true.
* Note: core networking stack automatically calls it
- * from netif_napi_del()
+ * from netif_napi_del().
* Drivers might want to call this helper to combine all
- * the needed rcu grace periods into a single one.
+ * the needed RCU grace periods into a single one.
*/
bool napi_hash_del(struct napi_struct *napi);
/**
* napi_disable - prevent NAPI from scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
@@ -501,7 +501,7 @@ void napi_disable(struct napi_struct *n);
/**
* napi_enable - enable NAPI scheduling
- * @n: napi context
+ * @n: NAPI context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
@@ -516,7 +516,7 @@ static inline void napi_enable(struct napi_struct *n)
/**
* napi_synchronize - wait until NAPI is not running
- * @n: napi context
+ * @n: NAPI context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
@@ -559,7 +559,7 @@ enum netdev_queue_state_t {
struct netdev_queue {
/*
- * read mostly part
+ * read-mostly part
*/
struct net_device *dev;
struct Qdisc __rcu *qdisc;
@@ -571,7 +571,7 @@ struct netdev_queue {
int numa_node;
#endif
/*
- * write mostly part
+ * write-mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
@@ -648,11 +648,11 @@ struct rps_dev_flow_table {
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
- * Each entry is a 32bit value. Upper part is the high order bits
- * of flow hash, lower part is cpu number.
+ * Each entry is a 32bit value. Upper part is the high-order bits
+ * of flow hash, lower part is CPU number.
* rps_cpu_mask is used to partition the space, depending on number of
- * possible cpus : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
- * For example, if 64 cpus are possible, rps_cpu_mask = 0x3f,
+ * possible CPUs : rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1
+ * For example, if 64 CPUs are possible, rps_cpu_mask = 0x3f,
* meaning we use 32-6=26 bits for the hash.
*/
struct rps_sock_flow_table {
@@ -674,7 +674,7 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
unsigned int index = hash & table->mask;
u32 val = hash & ~rps_cpu_mask;
- /* We only give a hint, preemption can change cpu under us */
+ /* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id();
if (table->ents[index] != val)
@@ -807,21 +807,21 @@ struct tc_to_netdev {
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
- * This function is called once when network device is registered.
- * The network device can use this to any late stage initializaton
- * or semantic validattion. It can fail with an error code which will
- * be propogated back to register_netdev
+ * This function is called once when a network device is registered.
+ * The network device can use this for any late stage initialization
+ * or semantic validation. It can fail with an error code which will
+ * be propagated back to register_netdev.
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
- * This function is called when network device transistions to the up
+ * This function is called when a network device transitions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
- * This function is called when network device transistions to the down
+ * This function is called when a network device transitions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
@@ -832,7 +832,7 @@ struct tc_to_netdev {
* corner cases, but the stack really does a non-trivial amount
* of useless work if you return NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
- * Required can not be NULL.
+ * Required; cannot be NULL.
*
* netdev_features_t (*ndo_fix_features)(struct net_device *dev,
* netdev_features_t features);
@@ -842,34 +842,34 @@ struct tc_to_netdev {
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
* void *accel_priv, select_queue_fallback_t fallback);
- * Called to decide which queue to when device supports multiple
+ * Called to decide which queue to use when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
- * changes to configuration when multicast or promiscious is enabled.
+ * changes to configuration when multicast or promiscuous is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
* If driver handles unicast address filtering, it should set
- * IFF_UNICAST_FLT to its priv_flags.
+ * IFF_UNICAST_FLT in its priv_flags.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
- * mac address can not be changed.
+ * MAC address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
- * Called when a user request an ioctl which can't be handled by
- * the generic interface code. If not defined ioctl's return
+ * Called when a user requests an ioctl which can't be handled by
+ * the generic interface code. If not defined ioctls return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
- * is retained for legacy reason, new devices should use the bus
+ * is retained for legacy reasons; new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
@@ -878,7 +878,7 @@ struct tc_to_netdev {
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
- * Callback uses when the transmitter has not made any progress
+ * Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
@@ -896,11 +896,11 @@ struct tc_to_netdev {
* neither operation.
*
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is registered.
*
* int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
- * If device support VLAN filtering this function is called when a
+ * If device supports VLAN filtering this function is called when a
* VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
@@ -920,7 +920,7 @@ struct tc_to_netdev {
*
* Enable or disable the VF ability to query its RSS Redirection Table and
* Hash Key. This is needed since on some devices VF share this information
- * with PF and querying it may adduce a theoretical security risk.
+ * with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
* int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
@@ -1030,20 +1030,20 @@ struct tc_to_netdev {
*
* void (*ndo_add_vxlan_port)(struct net_device *dev,
* sa_family_t sa_family, __be16 port);
- * Called by vxlan to notiy a driver about the UDP port and socket
- * address family that vxlan is listnening to. It is called only when
+ * Called by vxlan to notify a driver about the UDP port and socket
+ * address family that vxlan is listening to. It is called only when
* a new port starts listening. The operation is protected by the
* vxlan_net->sock_lock.
*
* void (*ndo_add_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify a driver about the UDP port and socket
* address family that geneve is listnening to. It is called only when
* a new port starts listening. The operation is protected by the
* geneve_net->sock_lock.
*
* void (*ndo_del_geneve_port)(struct net_device *dev,
- * sa_family_t sa_family, __be16 port);
+ * sa_family_t sa_family, __be16 port);
* Called by geneve to notify the driver about a UDP port and socket
* address family that geneve is not listening to anymore. The operation
* is protected by the geneve_net->sock_lock.
@@ -1072,9 +1072,9 @@ struct tc_to_netdev {
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
- * struct net_device *dev
- * netdev_features_t features);
+ * netdev_features_t (*ndo_features_check)(struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
* performing offload operations on a given packet. This is to give
* the device an opportunity to implement any restrictions that cannot
@@ -1088,7 +1088,7 @@ struct tc_to_netdev {
* int (*ndo_get_iflink)(const struct net_device *dev);
* Called to get the iflink value of this device.
* void (*ndo_change_proto_down)(struct net_device *dev,
- * bool proto_down);
+ * bool proto_down);
* This function is used to pass protocol port error state information
* to the switch driver. The switch driver can react to the proto_down
* by doing a phys down on the associated switch port.
@@ -1100,7 +1100,7 @@ struct tc_to_netdev {
* This function is used to specify the headroom that the skb must
* consider when allocation skb during packet reception. Setting
* appropriate rx headroom value allows avoiding skb head copy on
- * forward. Setting a negative value reset the rx headroom to the
+ * forward. Setting a negative value resets the rx headroom to the
* default value.
*
*/
@@ -1299,7 +1299,7 @@ struct net_device_ops {
*
* These are the &struct net_device, they are only set internally
* by drivers and used in the kernel. These flags are invisible to
- * userspace, this means that the order of these flags can change
+ * userspace; this means that the order of these flags can change
* during any kernel release.
*
* You should have a pretty good reason to be extending these flags.
@@ -1323,6 +1323,10 @@ struct net_device_ops {
* @IFF_LIVE_ADDR_CHANGE: device supports hardware address
* change when it's running
* @IFF_MACVLAN: Macvlan device
+ * @IFF_XMIT_DST_RELEASE_PERM: IFF_XMIT_DST_RELEASE not taking into account
+ * underlying stacked devices
+ * @IFF_IPVLAN_MASTER: IPvlan master device
+ * @IFF_IPVLAN_SLAVE: IPvlan slave device
* @IFF_L3MDEV_MASTER: device is an L3 master device
* @IFF_NO_QUEUE: device can run without qdisc attached
* @IFF_OPENVSWITCH: device is a Open vSwitch master
@@ -1413,10 +1417,12 @@ enum netdev_priv_flags {
*
* @state: Generic network queuing layer state, see netdev_state_t
* @dev_list: The global list of network devices
- * @napi_list: List entry, that is used for polling napi devices
- * @unreg_list: List entry, that is used, when we are unregistering the
- * device, see the function unregister_netdev
- * @close_list: List entry, that is used, when we are closing the device
+ * @napi_list: List entry used for polling NAPI devices
+ * @unreg_list: List entry when we are unregistering the
+ * device; see the function unregister_netdev
+ * @close_list: List entry used when we are closing the device
+ * @ptype_all: Device-specific packet handlers for all protocols
+ * @ptype_specific: Device-specific, protocol-specific packet handlers
*
* @adj_list: Directly linked devices, like slaves for bonding
* @all_adj_list: All linked devices, *including* neighbours
@@ -1434,7 +1440,7 @@ enum netdev_priv_flags {
* @mpls_features: Mask of features inheritable by MPLS
*
* @ifindex: interface index
- * @group: The group, that the device belongs to
+ * @group: The group the device belongs to
*
* @stats: Statistics struct, which was left as a legacy, use
* rtnl_link_stats64 instead
@@ -1488,7 +1494,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
- * @uc_promisc: Counter, that indicates, that promiscuous mode
+ * @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
* does not implement ndo_set_rx_mode()
@@ -1496,9 +1502,9 @@ enum netdev_priv_flags {
* @mc: multicast mac addresses
* @dev_addrs: list of device hw addresses
* @queues_kset: Group of all Kobjects in the Tx and RX queues
- * @promiscuity: Number of times, the NIC is told to work in
- * Promiscuous mode, if it becomes 0 the NIC will
- * exit from working in Promiscuous mode
+ * @promiscuity: Number of times the NIC is told to work in
+ * promiscuous mode; if it becomes 0 the NIC will
+ * exit promiscuous mode
* @allmulti: Counter, enables or disables allmulticast mode
*
* @vlan_info: VLAN info
@@ -1544,7 +1550,7 @@ enum netdev_priv_flags {
*
* @trans_start: Time (in jiffies) of last Tx
* @watchdog_timeo: Represents the timeout that is used by
- * the watchdog ( see dev_watchdog() )
+ * the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
*
* @pcpu_refcnt: Number of references to this device
@@ -1661,8 +1667,8 @@ struct net_device {
atomic_long_t rx_nohandler;
#ifdef CONFIG_WIRELESS_EXT
- const struct iw_handler_def * wireless_handlers;
- struct iw_public_data * wireless_data;
+ const struct iw_handler_def *wireless_handlers;
+ struct iw_public_data *wireless_data;
#endif
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
@@ -1715,7 +1721,7 @@ struct net_device {
unsigned int allmulti;
- /* Protocol specific pointers */
+ /* Protocol-specific pointers */
#if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info;
@@ -1745,13 +1751,11 @@ struct net_device {
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
-
#ifdef CONFIG_SYSFS
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
-
#endif
unsigned long gro_flush_timeout;
@@ -1843,7 +1847,7 @@ struct net_device {
struct garp_port __rcu *garp_port;
struct mrp_port __rcu *mrp_port;
- struct device dev;
+ struct device dev;
const struct attribute_group *sysfs_groups[4];
const struct attribute_group *sysfs_rx_queue_group;
@@ -1858,9 +1862,9 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
- struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
- u8 prio_tc_map[TC_BITMASK + 1];
+ u8 num_tc;
+ struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE)
unsigned int fcoe_ddp_xid;
@@ -1868,9 +1872,9 @@ struct net_device {
#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
struct netprio_map __rcu *priomap;
#endif
- struct phy_device *phydev;
- struct lock_class_key *qdisc_tx_busylock;
- bool proto_down;
+ struct phy_device *phydev;
+ struct lock_class_key *qdisc_tx_busylock;
+ bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2018,7 +2022,7 @@ static inline void *netdev_priv(const struct net_device *dev)
/* Set the sysfs device type for the network logical device to allow
* fine-grained identification of different network device types. For
- * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
+ * example Ethernet, Wireless LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
@@ -2028,22 +2032,22 @@ static inline void *netdev_priv(const struct net_device *dev)
#define NAPI_POLL_WEIGHT 64
/**
- * netif_napi_add - initialize a napi context
+ * netif_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
- * netif_napi_add() must be used to initialize a napi context prior to calling
- * *any* of the other napi related functions.
+ * netif_napi_add() must be used to initialize a NAPI context prior to calling
+ * *any* of the other NAPI-related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
- * netif_tx_napi_add - initialize a napi context
+ * netif_tx_napi_add - initialize a NAPI context
* @dev: network device
- * @napi: napi context
+ * @napi: NAPI context
* @poll: polling function
* @weight: default weight
*
@@ -2061,22 +2065,22 @@ static inline void netif_tx_napi_add(struct net_device *dev,
}
/**
- * netif_napi_del - remove a napi context
- * @napi: napi context
+ * netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
*
- * netif_napi_del() removes a napi context from the network device napi list
+ * netif_napi_del() removes a NAPI context from the network device NAPI list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
- void *frag0;
+ void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
- int data_offset;
+ int data_offset;
/* This is non-zero if the packet cannot be merged with the new skb. */
u16 flush;
@@ -2099,8 +2103,8 @@ struct napi_gro_cb {
/* This is non-zero if the packet may be of the same flow. */
u8 same_flow:1;
- /* Used in udp_gro_receive */
- u8 udp_mark:1;
+ /* Used in tunnel GRO receive */
+ u8 encap_mark:1;
/* GRO checksum is valid */
u8 csum_valid:1;
@@ -2172,7 +2176,7 @@ struct udp_offload {
struct udp_offload_callbacks callbacks;
};
-/* often modified stats are per cpu, other are shared (netdev->stats) */
+/* often modified stats are per-CPU, other are shared (netdev->stats) */
struct pcpu_sw_netstats {
u64 rx_packets;
u64 rx_bytes;
@@ -2269,7 +2273,7 @@ struct netdev_notifier_changeupper_info {
struct netdev_notifier_info info; /* must be first */
struct net_device *upper_dev; /* new upper dev */
bool master; /* is upper dev master */
- bool linking; /* is the nofication for link or unlink */
+ bool linking; /* is the notification for link or unlink */
void *upper_info; /* upper dev info */
};
@@ -2734,7 +2738,7 @@ extern int netdev_flow_limit_table_len;
#endif /* CONFIG_NET_FLOW_LIMIT */
/*
- * Incoming packets are placed on per-cpu queues
+ * Incoming packets are placed on per-CPU queues
*/
struct softnet_data {
struct list_head poll_list;
@@ -2904,7 +2908,7 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
@@ -2918,7 +2922,7 @@ static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_que
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
- * to give appropriate hint to the cpu.
+ * to give appropriate hint to the CPU.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
@@ -3057,7 +3061,7 @@ static inline bool netif_running(const struct net_device *dev)
}
/*
- * Routines to manage the subqueues on a device. We only need start
+ * Routines to manage the subqueues on a device. We only need start,
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
@@ -3341,7 +3345,6 @@ void netif_carrier_off(struct net_device *dev);
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
- *
*/
static inline void netif_dormant_on(struct net_device *dev)
{
@@ -3676,7 +3679,7 @@ void dev_uc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_uc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3692,7 +3695,7 @@ static inline int __dev_uc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_uc_sync().
- **/
+ */
static inline void __dev_uc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
@@ -3720,7 +3723,7 @@ void dev_mc_init(struct net_device *dev);
*
* Add newly added addresses to the interface, and release
* addresses that have been deleted.
- **/
+ */
static inline int __dev_mc_sync(struct net_device *dev,
int (*sync)(struct net_device *,
const unsigned char *),
@@ -3736,7 +3739,7 @@ static inline int __dev_mc_sync(struct net_device *dev,
* @unsync: function to call if address should be removed
*
* Remove all addresses that were added to the device by dev_mc_sync().
- **/
+ */
static inline void __dev_mc_unsync(struct net_device *dev,
int (*unsync)(struct net_device *,
const unsigned char *))
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 7ec5b86735f3..4630eeae18e0 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -65,7 +65,6 @@ static inline bool trigger_allbutself_cpu_backtrace(void)
#endif
#ifdef CONFIG_LOCKUP_DETECTOR
-int hw_nmi_is_cpu_stuck(struct pt_regs *);
u64 hw_nmi_get_sample_period(int watchdog_thresh);
extern int nmi_watchdog_enabled;
extern int soft_watchdog_enabled;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 78fda2a69ab8..f291275ffd71 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -121,6 +121,7 @@ struct hw_perf_event {
struct { /* intel_cqm */
int cqm_state;
u32 cqm_rmid;
+ int is_group_event;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
@@ -128,6 +129,10 @@ struct hw_perf_event {
struct { /* itrace */
int itrace_started;
};
+ struct { /* amd_power */
+ u64 pwr_acc;
+ u64 ptsc;
+ };
#ifdef CONFIG_HAVE_HW_BREAKPOINT
struct { /* breakpoint */
/*
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 1ecf13e148b8..6a241a277249 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -21,10 +21,17 @@
#include <linux/utsname.h>
/*
+ * Maximum size of AUTH_NONE authentication information, in XDR words.
+ */
+#define NUL_CALLSLACK (4)
+#define NUL_REPLYSLACK (2)
+
+/*
* Size of the nodename buffer. RFC1831 specifies a hard limit of 255 bytes,
* but Linux hostnames are actually limited to __NEW_UTS_LEN bytes.
*/
#define UNX_MAXNODENAME __NEW_UTS_LEN
+#define UNX_CALLSLACK (21 + XDR_QUADLEN(UNX_MAXNODENAME))
struct rpcsec_gss_info;
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 5322fea6fe4c..3081339968c3 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -75,8 +75,10 @@ struct svc_rdma_op_ctxt {
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
struct xdr_buf arg;
+ struct ib_cqe cqe;
+ struct ib_cqe reg_cqe;
+ struct ib_cqe inv_cqe;
struct list_head dto_q;
- enum ib_wr_opcode wr_op;
enum ib_wc_status wc_status;
u32 byte_len;
u32 position;
@@ -174,8 +176,6 @@ struct svcxprt_rdma {
struct work_struct sc_work;
};
/* sc_flags */
-#define RDMAXPRT_RQ_PENDING 1
-#define RDMAXPRT_SQ_PENDING 2
#define RDMAXPRT_CONN_PENDING 3
#define RPCRDMA_LISTEN_BACKLOG 10
@@ -199,7 +199,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
struct xdr_buf *rcvbuf);
/* svc_rdma_marshal.c */
-extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
+extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg *, struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
struct rpcrdma_msg *,
enum rpcrdma_errcode, __be32 *);
@@ -224,16 +224,22 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
/* svc_rdma_sendto.c */
extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
- struct svc_rdma_req_map *);
+ struct svc_rdma_req_map *, bool);
extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *);
+extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
+ int);
/* svc_rdma_transport.c */
+extern void svc_rdma_wc_send(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_write(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *);
+extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
-extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
- enum rpcrdma_errcode);
extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
+extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e13a1ace50e9..a55d0523f75d 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -156,6 +156,7 @@ struct thermal_attr {
* @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis
* @devdata: private pointer for device private data
* @trips: number of trip points the thermal zone supports
+ * @trips_disabled; bitmap for disabled trips
* @passive_delay: number of milliseconds to wait between polls when
* performing passive cooling.
* @polling_delay: number of milliseconds to wait between polls when
@@ -191,6 +192,7 @@ struct thermal_zone_device {
struct thermal_attr *trip_hyst_attrs;
void *devdata;
int trips;
+ unsigned long trips_disabled; /* bitmap for disabled trips */
int passive_delay;
int polling_delay;
int temperature;
@@ -362,6 +364,11 @@ thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
const struct thermal_zone_of_device_ops *ops);
void thermal_zone_of_sensor_unregister(struct device *dev,
struct thermal_zone_device *tz);
+struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops);
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz);
#else
static inline struct thermal_zone_device *
thermal_zone_of_sensor_register(struct device *dev, int id, void *data,
@@ -376,6 +383,19 @@ void thermal_zone_of_sensor_unregister(struct device *dev,
{
}
+static inline struct thermal_zone_device *devm_thermal_zone_of_sensor_register(
+ struct device *dev, int id, void *data,
+ const struct thermal_zone_of_device_ops *ops)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline
+void devm_thermal_zone_of_sensor_unregister(struct device *dev,
+ struct thermal_zone_device *tz)
+{
+}
+
#endif
#if IS_ENABLED(CONFIG_THERMAL)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 705df7db4482..0810f81b6db2 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -420,7 +420,8 @@ extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
extern enum event_trigger_type event_triggers_call(struct trace_event_file *file,
void *rec);
extern void event_triggers_post_call(struct trace_event_file *file,
- enum event_trigger_type tt);
+ enum event_trigger_type tt,
+ void *rec);
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
@@ -507,7 +508,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
/**
@@ -540,7 +541,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
irq_flags, pc, regs);
if (tt)
- event_triggers_post_call(file, tt);
+ event_triggers_post_call(file, tt, entry);
}
#ifdef CONFIG_BPF_EVENTS
diff --git a/include/net/flow.h b/include/net/flow.h
index 83969eebebf3..d47ef4bb5423 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -127,7 +127,6 @@ struct flowi6 {
#define flowi6_oif __fl_common.flowic_oif
#define flowi6_iif __fl_common.flowic_iif
#define flowi6_mark __fl_common.flowic_mark
-#define flowi6_tos __fl_common.flowic_tos
#define flowi6_scope __fl_common.flowic_scope
#define flowi6_proto __fl_common.flowic_proto
#define flowi6_flags __fl_common.flowic_flags
@@ -135,6 +134,7 @@ struct flowi6 {
#define flowi6_tun_key __fl_common.flowic_tun_key
struct in6_addr daddr;
struct in6_addr saddr;
+ /* Note: flowi6_tos is encoded in flowlabel, too. */
__be32 flowlabel;
union flowi_uli uli;
#define fl6_sport uli.ports.sport
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 064cfbe639d0..954ad6bfb56a 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
-struct in6_addr;
struct inet_bind_bucket;
struct request_sock;
struct sk_buff;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index c35dda9ec991..56050f913339 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -305,6 +305,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask);
+static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+{
+ if (skb_is_gso(skb)) {
+ int err;
+
+ err = skb_unclone(skb, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
+ NETIF_F_GSO_SHIFT);
+ }
+
+ skb->encapsulation = 0;
+ return 0;
+}
+
static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
{
if (pkt_len > 0) {
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index f3c9857c645d..d0aeb97aec5d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -835,6 +835,12 @@ static inline u8 ip6_tclass(__be32 flowinfo)
{
return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT;
}
+
+static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
+{
+ return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
+}
+
/*
* Prototypes exported by ipv6
*/
diff --git a/include/net/ping.h b/include/net/ping.h
index 5fd7cc244833..4cd90d6b5c25 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -79,7 +79,6 @@ int ping_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
void *user_icmph, size_t icmph_len);
-int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int ping_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
bool ping_rcv(struct sk_buff *skb);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 835aa2ed9870..65521cfdcade 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -82,6 +82,11 @@
#define SCTP_PROTOSW_FLAG INET_PROTOSW_PERMANENT
#endif
+/* Round an int up to the next multiple of 4. */
+#define WORD_ROUND(s) (((s)+3)&~3)
+/* Truncate to the previous multiple of 4. */
+#define WORD_TRUNC(s) ((s)&~3)
+
/*
* Function declarations.
*/
@@ -426,7 +431,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = min_t(int, frag, SCTP_MAX_CHUNK_LEN);
+ frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -475,9 +480,6 @@ for (pos = chunk->subh.fwdtsn_hdr->skip;\
(void *)pos <= (void *)chunk->subh.fwdtsn_hdr->skip + end - sizeof(struct sctp_fwdtsn_skip);\
pos++)
-/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
-
/* External references. */
extern struct proto sctp_prot;
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e2ac0620d4be..6df1ce7a411c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1097,7 +1097,7 @@ int sctp_bind_addr_dup(struct sctp_bind_addr *dest,
const struct sctp_bind_addr *src,
gfp_t gfp);
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
- __u8 addr_state, gfp_t gfp);
+ int new_size, __u8 addr_state, gfp_t gfp);
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
struct sctp_sock *);
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index a763c96ecde4..73ed2e951c02 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -271,36 +271,36 @@ static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb)
static inline __be32 vxlan_vni(__be32 vni_field)
{
#if defined(__BIG_ENDIAN)
- return vni_field >> 8;
+ return (__force __be32)((__force u32)vni_field >> 8);
#else
- return (vni_field & VXLAN_VNI_MASK) << 8;
+ return (__force __be32)((__force u32)(vni_field & VXLAN_VNI_MASK) << 8);
#endif
}
static inline __be32 vxlan_vni_field(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return vni << 8;
+ return (__force __be32)((__force u32)vni << 8);
#else
- return vni >> 8;
+ return (__force __be32)((__force u32)vni >> 8);
#endif
}
static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id)
{
#if defined(__BIG_ENDIAN)
- return tun_id;
+ return (__force __be32)tun_id;
#else
- return tun_id >> 32;
+ return (__force __be32)((__force u64)tun_id >> 32);
#endif
}
static inline __be64 vxlan_vni_to_tun_id(__be32 vni)
{
#if defined(__BIG_ENDIAN)
- return (__be64)vni;
+ return (__force __be64)vni;
#else
- return (__be64)vni << 32;
+ return (__force __be64)((u64)(__force u32)vni << 32);
#endif
}
diff --git a/include/trace/events/fib6.h b/include/trace/events/fib6.h
index 4cf6bac4686d..d60096cddb2a 100644
--- a/include/trace/events/fib6.h
+++ b/include/trace/events/fib6.h
@@ -37,7 +37,7 @@ TRACE_EVENT(fib6_table_lookup,
__entry->tb_id = tb_id;
__entry->oif = flp->flowi6_oif;
__entry->iif = flp->flowi6_iif;
- __entry->tos = flp->flowi6_tos;
+ __entry->tos = ip6_tclass(flp->flowlabel);
__entry->scope = flp->flowi6_scope;
__entry->flags = flp->flowi6_flags;
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index ca7217389067..6b2e154fd23a 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -140,42 +140,19 @@ DEFINE_EVENT(kmem_free, kfree,
TP_ARGS(call_site, ptr)
);
-DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
+DEFINE_EVENT(kmem_free, kmem_cache_free,
TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr),
-
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id()))
+ TP_ARGS(call_site, ptr)
);
-TRACE_EVENT_CONDITION(mm_page_free,
+TRACE_EVENT(mm_page_free,
TP_PROTO(struct page *page, unsigned int order),
TP_ARGS(page, order),
-
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id())),
-
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
@@ -276,23 +253,12 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
TP_ARGS(page, order, migratetype)
);
-TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
+TRACE_EVENT(mm_page_pcpu_drain,
TP_PROTO(struct page *page, unsigned int order, int migratetype),
TP_ARGS(page, order, migratetype),
- /*
- * This trace can be potentially called from an offlined cpu.
- * Since trace points use RCU and RCU should not be used from
- * offline cpus, filter such calls out.
- * While this trace can be called from a preemptable section,
- * it has no impact on the condition since tasks can migrate
- * only from online cpus to other online cpus. Thus its safe
- * to use raw_smp_processor_id.
- */
- TP_CONDITION(cpu_online(raw_smp_processor_id())),
-
TP_STRUCT__entry(
__field( unsigned long, pfn )
__field( unsigned int, order )
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
index 5738bb3e2343..2b4a8ff72d0d 100644
--- a/include/trace/events/thermal.h
+++ b/include/trace/events/thermal.h
@@ -8,6 +8,18 @@
#include <linux/thermal.h>
#include <linux/tracepoint.h>
+TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE);
+TRACE_DEFINE_ENUM(THERMAL_TRIP_ACTIVE);
+
+#define show_tzt_type(type) \
+ __print_symbolic(type, \
+ { THERMAL_TRIP_CRITICAL, "CRITICAL"}, \
+ { THERMAL_TRIP_HOT, "HOT"}, \
+ { THERMAL_TRIP_PASSIVE, "PASSIVE"}, \
+ { THERMAL_TRIP_ACTIVE, "ACTIVE"})
+
TRACE_EVENT(thermal_temperature,
TP_PROTO(struct thermal_zone_device *tz),
@@ -73,9 +85,9 @@ TRACE_EVENT(thermal_zone_trip,
__entry->trip_type = trip_type;
),
- TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
+ TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%s",
__get_str(thermal_zone), __entry->id, __entry->trip,
- __entry->trip_type)
+ show_tzt_type(__entry->trip_type))
);
TRACE_EVENT(thermal_power_cpu_get_power,
diff --git a/include/trace/events/tlb.h b/include/trace/events/tlb.h
index bc8815f45f3b..9d14b1992108 100644
--- a/include/trace/events/tlb.h
+++ b/include/trace/events/tlb.h
@@ -34,13 +34,11 @@ TLB_FLUSH_REASON
#define EM(a,b) { a, b },
#define EMe(a,b) { a, b }
-TRACE_EVENT_CONDITION(tlb_flush,
+TRACE_EVENT(tlb_flush,
TP_PROTO(int reason, unsigned long pages),
TP_ARGS(reason, pages),
- TP_CONDITION(cpu_online(smp_processor_id())),
-
TP_STRUCT__entry(
__field( int, reason)
__field(unsigned long, pages)
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index fff846b512e6..73614ce1d204 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -134,58 +134,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
#ifdef CREATE_TRACE_POINTS
#ifdef CONFIG_CGROUP_WRITEBACK
-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1;
+ return wb->memcg_css->cgroup->kn->ino;
}
-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
-{
- struct cgroup *cgrp = wb->memcg_css->cgroup;
- char *path;
-
- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1);
- WARN_ON_ONCE(path != buf);
-}
-
-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
-{
- if (wbc->wb)
- return __trace_wb_cgroup_size(wbc->wb);
- else
- return 2;
-}
-
-static inline void __trace_wbc_assign_cgroup(char *buf,
- struct writeback_control *wbc)
+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
if (wbc->wb)
- __trace_wb_assign_cgroup(buf, wbc->wb);
+ return __trace_wb_assign_cgroup(wbc->wb);
else
- strcpy(buf, "/");
+ return -1U;
}
-
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb)
-{
- return 2;
-}
-
-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb)
-{
- strcpy(buf, "/");
-}
-
-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc)
+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return 2;
+ return -1U;
}
-static inline void __trace_wbc_assign_cgroup(char *buf,
- struct writeback_control *wbc)
+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
- strcpy(buf, "/");
+ return -1U;
}
#endif /* CONFIG_CGROUP_WRITEBACK */
@@ -201,7 +171,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
__array(char, name, 32)
__field(unsigned long, ino)
__field(int, sync_mode)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -209,14 +179,14 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
dev_name(inode_to_bdi(inode)->dev), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
__entry->name,
__entry->ino,
__entry->sync_mode,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -246,7 +216,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
strncpy(__entry->name,
@@ -258,10 +228,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic = work->range_cyclic;
__entry->for_background = work->for_background;
__entry->reason = work->reason;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -270,7 +240,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -300,15 +270,15 @@ DECLARE_EVENT_CLASS(writeback_class,
TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: cgroup=%s",
+ TP_printk("bdi %s: cgroup_ino=%u",
__entry->name,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
@@ -347,7 +317,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -361,12 +331,12 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup=%s",
+ "start=0x%lx end=0x%lx cgroup_ino=%u",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -377,7 +347,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
)
@@ -398,7 +368,7 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
@@ -408,15 +378,15 @@ TRACE_EVENT(writeback_queue_io,
(jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -484,7 +454,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -496,13 +466,13 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->task_ratelimit = KBps(task_ratelimit);
__entry->balanced_dirty_ratelimit =
KBps(wb->balanced_dirty_ratelimit);
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu cgroup=%s",
+ "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
@@ -510,7 +480,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -548,7 +518,7 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -571,7 +541,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->period = period * 1000 / HZ;
__entry->pause = pause * 1000 / HZ;
__entry->paused = (jiffies - start_time) * 1000 / HZ;
- __trace_wb_assign_cgroup(__get_str(cgroup), wb);
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
@@ -580,7 +550,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -595,7 +565,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think, /* ms */
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -609,8 +579,7 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__field(unsigned long, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
- __dynamic_array(char, cgroup,
- __trace_wb_cgroup_size(inode_to_wb(inode)))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -619,16 +588,16 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode));
+ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
@@ -684,7 +653,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc))
+ __field(unsigned int, cgroup_ino)
),
TP_fast_assign(
@@ -696,11 +665,11 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->nr_to_write = nr_to_write;
__entry->wrote = nr_to_write - wbc->nr_to_write;
- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc);
+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu cgroup=%s",
+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
__entry->name,
__entry->ino,
show_inode_state(__entry->state),
@@ -709,7 +678,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote,
- __get_str(cgroup)
+ __entry->cgroup_ino
)
);
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 2835b07416b7..9222db8ccccc 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1648,9 +1648,9 @@ enum ethtool_reset_flags {
* %ETHTOOL_GLINKSETTINGS: on entry, number of words passed by user
* (>= 0); on return, if handshake in progress, negative if
* request size unsupported by kernel: absolute value indicates
- * kernel recommended size and cmd field is 0, as well as all the
- * other fields; otherwise (handshake completed), strictly
- * positive to indicate size used by kernel and cmd field is
+ * kernel expected size and all the other fields but cmd
+ * are 0; otherwise (handshake completed), strictly positive
+ * to indicate size used by kernel and cmd field stays
* %ETHTOOL_GLINKSETTINGS, all other fields populated by driver. For
* %ETHTOOL_SLINKSETTINGS: must be valid on entry, ie. a positive
* value returned previously by %ETHTOOL_GLINKSETTINGS, otherwise
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index a62a0129d614..c488066fb53a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -153,6 +153,8 @@ enum {
IFLA_LINK_NETNSID,
IFLA_PHYS_PORT_NAME,
IFLA_PROTO_DOWN,
+ IFLA_GSO_MAX_SEGS,
+ IFLA_GSO_MAX_SIZE,
__IFLA_MAX
};