diff options
Diffstat (limited to 'include/net')
62 files changed, 1613 insertions, 463 deletions
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 757a17638b1b..f2b801c4b555 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -92,7 +92,7 @@ extern void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr); static inline unsigned long addrconf_timeout_fixup(u32 timeout, - unsigned unit) + unsigned int unit) { if (timeout == 0xffffffff) return ~0UL; @@ -131,9 +131,9 @@ extern int ipv6_sock_mc_join(struct sock *sk, int ifindex, extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr); extern void ipv6_sock_mc_close(struct sock *sk); -extern int inet6_mc_check(struct sock *sk, - const struct in6_addr *mc_addr, - const struct in6_addr *src_addr); +extern bool inet6_mc_check(struct sock *sk, + const struct in6_addr *mc_addr, + const struct in6_addr *src_addr); extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr); extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr); @@ -146,10 +146,10 @@ extern void ipv6_mc_init_dev(struct inet6_dev *idev); extern void ipv6_mc_destroy_dev(struct inet6_dev *idev); extern void addrconf_dad_failure(struct inet6_ifaddr *ifp); -extern int ipv6_chk_mcast_addr(struct net_device *dev, - const struct in6_addr *group, - const struct in6_addr *src_addr); -extern int ipv6_is_mld(struct sk_buff *skb, int nexthdr); +extern bool ipv6_chk_mcast_addr(struct net_device *dev, + const struct in6_addr *group, + const struct in6_addr *src_addr); +extern bool ipv6_is_mld(struct sk_buff *skb, int nexthdr); extern void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao); @@ -163,8 +163,8 @@ extern void ipv6_sock_ac_close(struct sock *sk); extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr); extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr); -extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, - const struct in6_addr *addr); +extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev, + const struct in6_addr *addr); /* Device notifier */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index ca68e2cef230..2ee33da36a7a 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -22,7 +22,7 @@ extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1]; struct unix_address { atomic_t refcnt; int len; - unsigned hash; + unsigned int hash; struct sockaddr_un name[0]; }; diff --git a/include/net/ax25.h b/include/net/ax25.h index 94e09d361bb1..5d2352154cf6 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -215,7 +215,7 @@ typedef struct ax25_dev { struct ax25_dev *next; struct net_device *dev; struct net_device *forward; - struct ctl_table *systable; + struct ctl_table_header *sysheader; int values[AX25_MAX_VALUES]; #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; @@ -441,11 +441,11 @@ extern void ax25_uid_free(void); /* sysctl_net_ax25.c */ #ifdef CONFIG_SYSCTL -extern void ax25_register_sysctl(void); -extern void ax25_unregister_sysctl(void); +extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev); +extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev); #else -static inline void ax25_register_sysctl(void) {}; -static inline void ax25_unregister_sysctl(void) {}; +static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; } +static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {} #endif /* CONFIG_SYSCTL */ #endif diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 262ebd1747d4..a65910bda381 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -191,6 +191,7 @@ struct bt_sock { struct list_head accept_q; struct sock *parent; u32 defer_setup; + bool suspended; }; struct bt_sock_list { diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6822d2595aff..db1c5df45224 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -314,6 +314,7 @@ struct hci_conn { __u8 remote_cap; __u8 remote_auth; + bool flush_key; unsigned int sent; @@ -980,7 +981,7 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); int mgmt_connectable(struct hci_dev *hdev, u8 connectable); int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, - u8 persistent); + bool persistent); int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 flags, u8 *name, u8 name_len, u8 *dev_class); diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h index 6db8ecf52aa2..439dadc8102f 100644 --- a/include/net/caif/caif_hsi.h +++ b/include/net/caif/caif_hsi.h @@ -123,12 +123,21 @@ struct cfhsi_rx_state { bool piggy_desc; }; +/* Priority mapping */ +enum { + CFHSI_PRIO_CTL = 0, + CFHSI_PRIO_VI, + CFHSI_PRIO_VO, + CFHSI_PRIO_BEBK, + CFHSI_PRIO_LAST, +}; + /* Structure implemented by CAIF HSI drivers. */ struct cfhsi { struct caif_dev_common cfdev; struct net_device *ndev; struct platform_device *pdev; - struct sk_buff_head qhead; + struct sk_buff_head qhead[CFHSI_PRIO_LAST]; struct cfhsi_drv drv; struct cfhsi_dev *dev; int tx_state; @@ -151,8 +160,14 @@ struct cfhsi { wait_queue_head_t wake_up_wait; wait_queue_head_t wake_down_wait; wait_queue_head_t flush_fifo_wait; - struct timer_list timer; + struct timer_list inactivity_timer; struct timer_list rx_slowpath_timer; + + /* TX aggregation */ + unsigned long aggregation_timeout; + int aggregation_len; + struct timer_list aggregation_timer; + unsigned long bits; }; diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index 6bd200a4754a..83a89ba3005b 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h @@ -188,11 +188,18 @@ struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt); */ void *cfpkt_tonative(struct cfpkt *pkt); - /* * Returns packet information for a packet. * pkt Packet to get info from; * @return Packet information */ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt); + +/** cfpkt_set_prio - set priority for a CAIF packet. + * + * @pkt: The CAIF packet to be adjusted. + * @prio: one of TC_PRIO_ constants. + */ +void cfpkt_set_prio(struct cfpkt *pkt, int prio); + #endif /* CFPKT_H_ */ diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 83d800c31e3c..adb2320bccdf 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -521,6 +521,7 @@ struct station_parameters { * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled * @STATION_INFO_STA_FLAGS: @sta_flags filled * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled + * @STATION_INFO_T_OFFSET: @t_offset filled */ enum station_info_flags { STATION_INFO_INACTIVE_TIME = 1<<0, @@ -542,7 +543,8 @@ enum station_info_flags { STATION_INFO_CONNECTED_TIME = 1<<16, STATION_INFO_ASSOC_REQ_IES = 1<<17, STATION_INFO_STA_FLAGS = 1<<18, - STATION_INFO_BEACON_LOSS_COUNT = 1<<19 + STATION_INFO_BEACON_LOSS_COUNT = 1<<19, + STATION_INFO_T_OFFSET = 1<<20, }; /** @@ -643,6 +645,7 @@ struct sta_bss_parameters { * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. * @sta_flags: station flags mask & values * @beacon_loss_count: Number of times beacon loss event has triggered. + * @t_offset: Time offset of the station relative to this host. */ struct station_info { u32 filled; @@ -671,6 +674,7 @@ struct station_info { size_t assoc_req_ies_len; u32 beacon_loss_count; + s64 t_offset; /* * Note: Add a new enum station_info_flags value for each new field and @@ -798,6 +802,8 @@ struct mesh_config { /* ttl used in path selection information elements */ u8 element_ttl; bool auto_open_plinks; + /* neighbor offset synchronization */ + u32 dot11MeshNbrOffsetMaxNeighbor; /* HWMP parameters */ u8 dot11MeshHWMPmaxPREQretries; u32 path_refresh_time; @@ -815,12 +821,14 @@ struct mesh_config { bool dot11MeshGateAnnouncementProtocol; bool dot11MeshForwarding; s32 rssi_threshold; + u16 ht_opmode; }; /** * struct mesh_setup - 802.11s mesh setup configuration * @mesh_id: the mesh ID * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes + * @sync_method: which synchronization method to use * @path_sel_proto: which path selection protocol to use * @path_metric: which metric to use * @ie: vendor information elements (optional) @@ -834,8 +842,9 @@ struct mesh_config { struct mesh_setup { const u8 *mesh_id; u8 mesh_id_len; - u8 path_sel_proto; - u8 path_metric; + u8 sync_method; + u8 path_sel_proto; + u8 path_metric; const u8 *ie; u8 ie_len; bool is_authenticated; @@ -845,7 +854,7 @@ struct mesh_setup { /** * struct ieee80211_txq_params - TX queue parameters - * @queue: TX queue identifier (NL80211_TXQ_Q_*) + * @ac: AC identifier * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range * 1..32767] @@ -854,7 +863,7 @@ struct mesh_setup { * @aifs: Arbitration interframe space [0..255] */ struct ieee80211_txq_params { - enum nl80211_txq_q queue; + enum nl80211_ac ac; u16 txop; u16 cwmin; u16 cwmax; @@ -1336,6 +1345,9 @@ struct cfg80211_gtk_rekey_data { * be %NULL or contain the enabled Wake-on-Wireless triggers that are * configured for the device. * @resume: wiphy device needs to be resumed + * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback + * to call device_set_wakeup_enable() to enable/disable wakeup from + * the device. * * @add_virtual_intf: create a new virtual interface with the given name, * must set the struct wireless_dev's iftype. Beware: You must create @@ -1503,10 +1515,21 @@ struct cfg80211_gtk_rekey_data { * later passes to cfg80211_probe_status(). * * @set_noack_map: Set the NoAck Map for the TIDs. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * See @ethtool_ops.get_sset_count + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * See @ethtool_ops.get_ethtool_stats + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * See @ethtool_ops.get_strings */ struct cfg80211_ops { int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); int (*resume)(struct wiphy *wiphy); + void (*set_wakeup)(struct wiphy *wiphy, bool enabled); struct net_device * (*add_virtual_intf)(struct wiphy *wiphy, char *name, @@ -1698,7 +1721,15 @@ struct cfg80211_ops { struct net_device *dev, u16 noack_map); - struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy); + struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy, + enum nl80211_channel_type *type); + + int (*get_et_sset_count)(struct wiphy *wiphy, + struct net_device *dev, int sset); + void (*get_et_stats)(struct wiphy *wiphy, struct net_device *dev, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct wiphy *wiphy, struct net_device *dev, + u32 sset, u8 *data); }; /* @@ -1732,10 +1763,6 @@ struct cfg80211_ops { * hints read the documenation for regulatory_hint_found_beacon() * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this * wiphy at all - * @WIPHY_FLAG_ENFORCE_COMBINATIONS: Set this flag to enforce interface - * combinations for this device. This flag is used for backward - * compatibility only until all drivers advertise combinations and - * they will always be enforced. * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled * by default -- this flag will be set depending on the kernel's default * on wiphy_new(), but can be changed by the driver if it has a good @@ -1780,7 +1807,7 @@ enum wiphy_flags { WIPHY_FLAG_IBSS_RSN = BIT(8), WIPHY_FLAG_MESH_AUTH = BIT(10), WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11), - WIPHY_FLAG_ENFORCE_COMBINATIONS = BIT(12), + /* use hole at 12 */ WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), WIPHY_FLAG_AP_UAPSD = BIT(14), WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), @@ -3343,6 +3370,17 @@ int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy, enum nl80211_channel_type channel_type); /* + * cfg80211_ch_switch_notify - update wdev channel and notify userspace + * @dev: the device which switched channels + * @freq: new channel frequency (in MHz) + * @type: channel type + * + * Acquires wdev_lock, so must only be called from sleepable driver context! + */ +void cfg80211_ch_switch_notify(struct net_device *dev, int freq, + enum nl80211_channel_type type); + +/* * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) * @rate: given rate_info to calculate bitrate from * diff --git a/include/net/codel.h b/include/net/codel.h new file mode 100644 index 000000000000..550debfc2403 --- /dev/null +++ b/include/net/codel.h @@ -0,0 +1,342 @@ +#ifndef __NET_SCHED_CODEL_H +#define __NET_SCHED_CODEL_H + +/* + * Codel - The Controlled-Delay Active Queue Management algorithm + * + * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com> + * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net> + * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net> + * Copyright (C) 2012 Eric Dumazet <edumazet@google.com> + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the authors may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * Alternatively, provided that this notice is retained in full, this + * software may be distributed under the terms of the GNU General + * Public License ("GPL") version 2, in which case the provisions of the + * GPL apply INSTEAD OF those given above. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH + * DAMAGE. + * + */ + +#include <linux/types.h> +#include <linux/ktime.h> +#include <linux/skbuff.h> +#include <net/pkt_sched.h> +#include <net/inet_ecn.h> +#include <linux/reciprocal_div.h> + +/* Controlling Queue Delay (CoDel) algorithm + * ========================================= + * Source : Kathleen Nichols and Van Jacobson + * http://queue.acm.org/detail.cfm?id=2209336 + * + * Implemented on linux by Dave Taht and Eric Dumazet + */ + + +/* CoDel uses a 1024 nsec clock, encoded in u32 + * This gives a range of 2199 seconds, because of signed compares + */ +typedef u32 codel_time_t; +typedef s32 codel_tdiff_t; +#define CODEL_SHIFT 10 +#define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT) + +static inline codel_time_t codel_get_time(void) +{ + u64 ns = ktime_to_ns(ktime_get()); + + return ns >> CODEL_SHIFT; +} + +#define codel_time_after(a, b) ((s32)(a) - (s32)(b) > 0) +#define codel_time_after_eq(a, b) ((s32)(a) - (s32)(b) >= 0) +#define codel_time_before(a, b) ((s32)(a) - (s32)(b) < 0) +#define codel_time_before_eq(a, b) ((s32)(a) - (s32)(b) <= 0) + +/* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */ +struct codel_skb_cb { + codel_time_t enqueue_time; +}; + +static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb) +{ + qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb)); + return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data; +} + +static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb) +{ + return get_codel_cb(skb)->enqueue_time; +} + +static void codel_set_enqueue_time(struct sk_buff *skb) +{ + get_codel_cb(skb)->enqueue_time = codel_get_time(); +} + +static inline u32 codel_time_to_us(codel_time_t val) +{ + u64 valns = ((u64)val << CODEL_SHIFT); + + do_div(valns, NSEC_PER_USEC); + return (u32)valns; +} + +/** + * struct codel_params - contains codel parameters + * @target: target queue size (in time units) + * @interval: width of moving time window + * @ecn: is Explicit Congestion Notification enabled + */ +struct codel_params { + codel_time_t target; + codel_time_t interval; + bool ecn; +}; + +/** + * struct codel_vars - contains codel variables + * @count: how many drops we've done since the last time we + * entered dropping state + * @lastcount: count at entry to dropping state + * @dropping: set to true if in dropping state + * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1 + * @first_above_time: when we went (or will go) continuously above target + * for interval + * @drop_next: time to drop next packet, or when we dropped last + * @ldelay: sojourn time of last dequeued packet + */ +struct codel_vars { + u32 count; + u32 lastcount; + bool dropping; + u16 rec_inv_sqrt; + codel_time_t first_above_time; + codel_time_t drop_next; + codel_time_t ldelay; +}; + +#define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */ +/* needed shift to get a Q0.32 number from rec_inv_sqrt */ +#define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS) + +/** + * struct codel_stats - contains codel shared variables and stats + * @maxpacket: largest packet we've seen so far + * @drop_count: temp count of dropped packets in dequeue() + * ecn_mark: number of packets we ECN marked instead of dropping + */ +struct codel_stats { + u32 maxpacket; + u32 drop_count; + u32 ecn_mark; +}; + +static void codel_params_init(struct codel_params *params) +{ + params->interval = MS2TIME(100); + params->target = MS2TIME(5); + params->ecn = false; +} + +static void codel_vars_init(struct codel_vars *vars) +{ + memset(vars, 0, sizeof(*vars)); +} + +static void codel_stats_init(struct codel_stats *stats) +{ + stats->maxpacket = 256; +} + +/* + * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots + * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2) + * + * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32 + */ +static void codel_Newton_step(struct codel_vars *vars) +{ + u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT; + u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32; + u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2); + + val >>= 2; /* avoid overflow in following multiply */ + val = (val * invsqrt) >> (32 - 2 + 1); + + vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT; +} + +/* + * CoDel control_law is t + interval/sqrt(count) + * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid + * both sqrt() and divide operation. + */ +static codel_time_t codel_control_law(codel_time_t t, + codel_time_t interval, + u32 rec_inv_sqrt) +{ + return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT); +} + + +static bool codel_should_drop(const struct sk_buff *skb, + struct Qdisc *sch, + struct codel_vars *vars, + struct codel_params *params, + struct codel_stats *stats, + codel_time_t now) +{ + bool ok_to_drop; + + if (!skb) { + vars->first_above_time = 0; + return false; + } + + vars->ldelay = now - codel_get_enqueue_time(skb); + sch->qstats.backlog -= qdisc_pkt_len(skb); + + if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket)) + stats->maxpacket = qdisc_pkt_len(skb); + + if (codel_time_before(vars->ldelay, params->target) || + sch->qstats.backlog <= stats->maxpacket) { + /* went below - stay below for at least interval */ + vars->first_above_time = 0; + return false; + } + ok_to_drop = false; + if (vars->first_above_time == 0) { + /* just went above from below. If we stay above + * for at least interval we'll say it's ok to drop + */ + vars->first_above_time = now + params->interval; + } else if (codel_time_after(now, vars->first_above_time)) { + ok_to_drop = true; + } + return ok_to_drop; +} + +typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars, + struct Qdisc *sch); + +static struct sk_buff *codel_dequeue(struct Qdisc *sch, + struct codel_params *params, + struct codel_vars *vars, + struct codel_stats *stats, + codel_skb_dequeue_t dequeue_func) +{ + struct sk_buff *skb = dequeue_func(vars, sch); + codel_time_t now; + bool drop; + + if (!skb) { + vars->dropping = false; + return skb; + } + now = codel_get_time(); + drop = codel_should_drop(skb, sch, vars, params, stats, now); + if (vars->dropping) { + if (!drop) { + /* sojourn time below target - leave dropping state */ + vars->dropping = false; + } else if (codel_time_after_eq(now, vars->drop_next)) { + /* It's time for the next drop. Drop the current + * packet and dequeue the next. The dequeue might + * take us out of dropping state. + * If not, schedule the next drop. + * A large backlog might result in drop rates so high + * that the next drop should happen now, + * hence the while loop. + */ + while (vars->dropping && + codel_time_after_eq(now, vars->drop_next)) { + vars->count++; /* dont care of possible wrap + * since there is no more divide + */ + codel_Newton_step(vars); + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + goto end; + } + qdisc_drop(skb, sch); + stats->drop_count++; + skb = dequeue_func(vars, sch); + if (!codel_should_drop(skb, sch, + vars, params, stats, now)) { + /* leave dropping state */ + vars->dropping = false; + } else { + /* and schedule the next drop */ + vars->drop_next = + codel_control_law(vars->drop_next, + params->interval, + vars->rec_inv_sqrt); + } + } + } + } else if (drop) { + if (params->ecn && INET_ECN_set_ce(skb)) { + stats->ecn_mark++; + } else { + qdisc_drop(skb, sch); + stats->drop_count++; + + skb = dequeue_func(vars, sch); + drop = codel_should_drop(skb, sch, vars, params, + stats, now); + } + vars->dropping = true; + /* if min went above target close to when we last went below it + * assume that the drop rate that controlled the queue on the + * last cycle is a good starting point to control it now. + */ + if (codel_time_before(now - vars->drop_next, + 16 * params->interval)) { + vars->count = (vars->count - vars->lastcount) | 1; + /* we dont care if rec_inv_sqrt approximation + * is not very precise : + * Next Newton steps will correct it quadratically. + */ + codel_Newton_step(vars); + } else { + vars->count = 1; + vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; + } + vars->lastcount = vars->count; + vars->drop_next = codel_control_law(now, params->interval, + vars->rec_inv_sqrt); + } +end: + return skb; +} +#endif diff --git a/include/net/compat.h b/include/net/compat.h index a974ae92d182..6e9565324989 100644 --- a/include/net/compat.h +++ b/include/net/compat.h @@ -42,12 +42,12 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *); extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int); -extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned); +extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int); extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, - unsigned, unsigned); -extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned); + unsigned int, unsigned int); +extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int); extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *, - unsigned, unsigned, + unsigned int, unsigned int, struct compat_timespec __user *); extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h index f55c980d8e23..fc5d5dcebb00 100644 --- a/include/net/dcbnl.h +++ b/include/net/dcbnl.h @@ -48,6 +48,8 @@ struct dcbnl_rtnl_ops { /* IEEE 802.1Qaz std */ int (*ieee_getets) (struct net_device *, struct ieee_ets *); int (*ieee_setets) (struct net_device *, struct ieee_ets *); + int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *); + int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *); int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); int (*ieee_getapp) (struct net_device *, struct dcb_app *); diff --git a/include/net/dn.h b/include/net/dn.h index 814af0b9387d..c88bf4ebd330 100644 --- a/include/net/dn.h +++ b/include/net/dn.h @@ -199,7 +199,7 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp) fld->fld_dport = scp->addrrem; } -extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu); +extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu); #define DN_MENUVER_ACC 0x01 #define DN_MENUVER_USR 0x02 diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h index 782ef7cb4930..1ee9d4bda30d 100644 --- a/include/net/dn_fib.h +++ b/include/net/dn_fib.h @@ -31,7 +31,7 @@ struct dn_fib_res { struct dn_fib_nh { struct net_device *nh_dev; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; int nh_weight; int nh_power; @@ -45,7 +45,7 @@ struct dn_fib_info { int fib_treeref; atomic_t fib_clntref; int fib_dead; - unsigned fib_flags; + unsigned int fib_flags; int fib_protocol; __le16 fib_prefsrc; __u32 fib_priority; @@ -140,7 +140,7 @@ extern void dn_fib_table_cleanup(void); */ extern void dn_fib_rules_init(void); extern void dn_fib_rules_cleanup(void); -extern unsigned dnet_addr_type(__le16 addr); +extern unsigned int dnet_addr_type(__le16 addr); extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res); extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/include/net/dn_route.h b/include/net/dn_route.h index 81712cfa1ddf..c507e05d172f 100644 --- a/include/net/dn_route.h +++ b/include/net/dn_route.h @@ -76,8 +76,8 @@ struct dn_route { __le16 rt_src_map; __le16 rt_dst_map; - unsigned rt_flags; - unsigned rt_type; + unsigned int rt_flags; + unsigned int rt_type; }; static inline bool dn_is_input_route(struct dn_route *rt) diff --git a/include/net/dst.h b/include/net/dst.h index 59c5d18cc385..bed833d9796a 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -36,7 +36,11 @@ struct dst_entry { struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; - unsigned long expires; + union { + unsigned long expires; + /* point to where the dst_entry copied from */ + struct dst_entry *from; + }; struct dst_entry *path; struct neighbour __rcu *_neighbour; #ifdef CONFIG_XFRM @@ -55,6 +59,7 @@ struct dst_entry { #define DST_NOCACHE 0x0010 #define DST_NOCOUNT 0x0020 #define DST_NOPEER 0x0040 +#define DST_FAKE_RTABLE 0x0080 short error; short obsolete; diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index e1c2ee0eef47..3682a0a076c1 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h @@ -12,7 +12,7 @@ struct sk_buff; struct dst_ops { unsigned short family; __be16 protocol; - unsigned gc_thresh; + unsigned int gc_thresh; int (*gc)(struct dst_ops *ops); struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); diff --git a/include/net/icmp.h b/include/net/icmp.h index 75d615649071..9ac2524d1402 100644 --- a/include/net/icmp.h +++ b/include/net/icmp.h @@ -25,7 +25,7 @@ struct icmp_err { int errno; - unsigned fatal:1; + unsigned int fatal:1; }; extern const struct icmp_err icmp_err_convert[]; @@ -41,7 +41,6 @@ struct net; extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); extern int icmp_rcv(struct sk_buff *skb); -extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg); extern int icmp_init(void); extern void icmp_out_count(struct net *net, unsigned char type); diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h index 57430555487a..d104c882fc29 100644 --- a/include/net/ieee802154_netdev.h +++ b/include/net/ieee802154_netdev.h @@ -1,7 +1,7 @@ /* * An interface between IEEE802.15.4 device and rest of the kernel. * - * Copyright (C) 2007, 2008, 2009 Siemens AG + * Copyright (C) 2007-2012 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 @@ -21,11 +21,14 @@ * Maxim Gorbachyov <maxim.gorbachev@siemens.com> * Maxim Osipov <maxim.osipov@siemens.com> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> + * Alexander Smirnov <alex.bluesman.smirnov@gmail.com> */ #ifndef IEEE802154_NETDEVICE_H #define IEEE802154_NETDEVICE_H +#include <net/af_ieee802154.h> + /* * A control block of skb passed between the ARPHRD_IEEE802154 device * and other stack parts. @@ -110,12 +113,26 @@ struct ieee802154_mlme_ops { u8 (*get_bsn)(const struct net_device *dev); }; -static inline struct ieee802154_mlme_ops *ieee802154_mlme_ops( - const struct net_device *dev) +/* The IEEE 802.15.4 standard defines 2 type of the devices: + * - FFD - full functionality device + * - RFD - reduce functionality device + * + * So 2 sets of mlme operations are needed + */ +struct ieee802154_reduced_mlme_ops { + struct wpan_phy *(*get_phy)(const struct net_device *dev); +}; + +static inline struct ieee802154_mlme_ops * +ieee802154_mlme_ops(const struct net_device *dev) { return dev->ml_priv; } -#endif - +static inline struct ieee802154_reduced_mlme_ops * +ieee802154_reduced_mlme_ops(const struct net_device *dev) +{ + return dev->ml_priv; +} +#endif diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h index 51a7031b4aa3..93563221d29a 100644 --- a/include/net/if_inet6.h +++ b/include/net/if_inet6.h @@ -120,7 +120,7 @@ struct ifmcaddr6 { unsigned char mca_crcount; unsigned long mca_sfcount[2]; struct timer_list mca_timer; - unsigned mca_flags; + unsigned int mca_flags; int mca_users; atomic_t mca_refcnt; spinlock_t mca_lock; @@ -209,60 +209,6 @@ static inline void ipv6_eth_mc_map(const struct in6_addr *addr, char *buf) memcpy(buf + 2, &addr->s6_addr32[3], sizeof(__u32)); } -static inline void ipv6_tr_mc_map(const struct in6_addr *addr, char *buf) -{ - /* All nodes FF01::1, FF02::1, FF02::1:FFxx:xxxx */ - - if (((addr->s6_addr[0] == 0xFF) && - ((addr->s6_addr[1] == 0x01) || (addr->s6_addr[1] == 0x02)) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 1)) || - ((addr->s6_addr[0] == 0xFF) && - (addr->s6_addr[1] == 0x02) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr16[4] == 0) && - (addr->s6_addr[10] == 0) && - (addr->s6_addr[11] == 1) && - (addr->s6_addr[12] == 0xff))) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x01; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - /* All routers FF0x::2 */ - } else if ((addr->s6_addr[0] ==0xff) && - ((addr->s6_addr[1] & 0xF0) == 0) && - (addr->s6_addr16[1] == 0) && - (addr->s6_addr32[1] == 0) && - (addr->s6_addr32[2] == 0) && - (addr->s6_addr16[6] == 0) && - (addr->s6_addr[15] == 2)) - { - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x02; - buf[3]=0x00; - buf[4]=0x00; - buf[5]=0x00; - } else { - unsigned char i ; - - i = addr->s6_addr[15] & 7 ; - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x01 << i ; - buf[4]=0x00; - buf[5]=0x00; - } -} - static inline void ipv6_arcnet_mc_map(const struct in6_addr *addr, char *buf) { buf[0] = 0x00; diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h index 3207e58ee019..1866a676c810 100644 --- a/include/net/inet6_connection_sock.h +++ b/include/net/inet6_connection_sock.h @@ -23,7 +23,7 @@ struct sock; struct sockaddr; extern int inet6_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); extern struct dst_entry* inet6_csk_route_req(struct sock *sk, const struct request_sock *req); diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index dbf9aab34c82..7d83f90f203f 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -45,6 +45,7 @@ struct inet_connection_sock_af_ops { struct dst_entry *dst); struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it); u16 net_header_len; + u16 net_frag_header_len; u16 sockaddr_len; int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); @@ -60,7 +61,7 @@ struct inet_connection_sock_af_ops { #endif void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); int (*bind_conflict)(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); }; /** inet_connection_sock - INET connection oriented sock @@ -245,7 +246,7 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk, const __be32 raddr, const __be32 laddr); extern int inet_csk_bind_conflict(const struct sock *sk, - const struct inet_bind_bucket *tb); + const struct inet_bind_bucket *tb, bool relax); extern int inet_csk_get_port(struct sock *sk, unsigned short snum); extern struct dst_entry* inet_csk_route_req(struct sock *sk, diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 16ff29a7bb30..2431cf83aeca 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -46,8 +46,7 @@ struct inet_frags { void *arg); void (*destructor)(struct inet_frag_queue *); void (*skb_free)(struct sk_buff *); - int (*match)(struct inet_frag_queue *q, - void *arg); + bool (*match)(struct inet_frag_queue *q, void *arg); void (*frag_expire)(unsigned long data); }; diff --git a/include/net/ip.h b/include/net/ip.h index b53d65f24f7b..83e0619f59d0 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -141,23 +141,6 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4) extern int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); -/* - * Map a multicast IP onto multicast MAC for type Token Ring. - * This conforms to RFC1469 Option 2 Multicasting i.e. - * using a functional address to transmit / receive - * multicast packets. - */ - -static inline void ip_tr_mc_map(__be32 addr, char *buf) -{ - buf[0]=0xC0; - buf[1]=0x00; - buf[2]=0x00; - buf[3]=0x04; - buf[4]=0x00; - buf[5]=0x00; -} - struct ip_reply_arg { struct kvec iov[1]; int flags; @@ -222,9 +205,6 @@ static inline int inet_is_reserved_local_port(int port) extern int sysctl_ip_nonlocal_bind; -extern struct ctl_path net_core_path[]; -extern struct ctl_path net_ipv4_ctl_path[]; - /* From inetpeer.c */ extern int inet_peer_threshold; extern int inet_peer_minttl; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index b26bb8101981..0ae759a6c76e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -123,6 +123,54 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) return ((struct rt6_info *)dst)->rt6i_idev; } +static inline void rt6_clean_expires(struct rt6_info *rt) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.from = NULL; +} + +static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + + rt->rt6i_flags |= RTF_EXPIRES; + rt->dst.expires = expires; +} + +static inline void rt6_update_expires(struct rt6_info *rt, int timeout) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES)) { + if (rt->dst.from) + dst_release(rt->dst.from); + /* dst_set_expires relies on expires == 0 + * if it has not been set previously. + */ + rt->dst.expires = 0; + } + + dst_set_expires(&rt->dst, timeout); + rt->rt6i_flags |= RTF_EXPIRES; +} + +static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) +{ + struct dst_entry *new = (struct dst_entry *) from; + + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) { + if (new == rt->dst.from) + return; + dst_release(rt->dst.from); + } + + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.from = new; + dst_hold(new); +} + struct fib6_walker_t { struct list_head lh; struct fib6_node *root, *node; diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 2ad92ca4e6f3..37c1a1ed82c1 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -146,7 +146,7 @@ struct rt6_rtnl_dump_arg { extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); extern void rt6_ifdown(struct net *net, struct net_device *dev); -extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); +extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu); extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); @@ -175,7 +175,7 @@ static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, spin_unlock(&sk->sk_dst_lock); } -static inline int ipv6_unicast_destination(struct sk_buff *skb) +static inline bool ipv6_unicast_destination(const struct sk_buff *skb) { struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 10422ef14e28..78df0866cc38 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -49,7 +49,7 @@ struct fib_nh { struct net_device *nh_dev; struct hlist_node nh_hash; struct fib_info *nh_parent; - unsigned nh_flags; + unsigned int nh_flags; unsigned char nh_scope; #ifdef CONFIG_IP_ROUTE_MULTIPATH int nh_weight; @@ -74,7 +74,7 @@ struct fib_info { struct net *fib_net; int fib_treeref; atomic_t fib_clntref; - unsigned fib_flags; + unsigned int fib_flags; unsigned char fib_dead; unsigned char fib_protocol; unsigned char fib_scope; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 2bdee51ba30d..d6146b4811c2 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -10,7 +10,6 @@ #include <asm/types.h> /* for __uXX types */ -#include <linux/sysctl.h> /* for ctl_path */ #include <linux/list.h> /* for struct list_head */ #include <linux/spinlock.h> /* for struct rwlock_t */ #include <linux/atomic.h> /* for struct atomic_t */ @@ -393,7 +392,7 @@ struct ip_vs_protocol { void (*exit)(struct ip_vs_protocol *pp); - void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); + int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); @@ -505,6 +504,7 @@ struct ip_vs_conn { * state transition triggerd * synchronization */ + unsigned long sync_endtime; /* jiffies + sent_retries */ /* Control members */ struct ip_vs_conn *control; /* Master control connection */ @@ -580,8 +580,8 @@ struct ip_vs_service_user_kern { /* virtual service options */ char *sched_name; char *pe_name; - unsigned flags; /* virtual service flags */ - unsigned timeout; /* persistent timeout in sec */ + unsigned int flags; /* virtual service flags */ + unsigned int timeout; /* persistent timeout in sec */ u32 netmask; /* persistent netmask */ }; @@ -592,7 +592,7 @@ struct ip_vs_dest_user_kern { u16 port; /* real server options */ - unsigned conn_flags; /* connection flags */ + unsigned int conn_flags; /* connection flags */ int weight; /* destination weight */ /* thresholds for active connections */ @@ -616,8 +616,8 @@ struct ip_vs_service { union nf_inet_addr addr; /* IP address for virtual service */ __be16 port; /* port number for the service */ __u32 fwmark; /* firewall mark of the service */ - unsigned flags; /* service status flags */ - unsigned timeout; /* persistent timeout in ticks */ + unsigned int flags; /* service status flags */ + unsigned int timeout; /* persistent timeout in ticks */ __be32 netmask; /* grouping granularity */ struct net *net; @@ -647,7 +647,7 @@ struct ip_vs_dest { u16 af; /* address family */ __be16 port; /* port number of the server */ union nf_inet_addr addr; /* IP address of the server */ - volatile unsigned flags; /* dest status flags */ + volatile unsigned int flags; /* dest status flags */ atomic_t conn_flags; /* flags to copy to conn */ atomic_t weight; /* server weight */ @@ -784,6 +784,16 @@ struct ip_vs_app { void (*timeout_change)(struct ip_vs_app *app, int flags); }; +struct ipvs_master_sync_state { + struct list_head sync_queue; + struct ip_vs_sync_buff *sync_buff; + int sync_queue_len; + unsigned int sync_queue_delay; + struct task_struct *master_thread; + struct delayed_work master_wakeup_work; + struct netns_ipvs *ipvs; +}; + /* IPVS in network namespace */ struct netns_ipvs { int gen; /* Generation */ @@ -870,10 +880,15 @@ struct netns_ipvs { #endif int sysctl_snat_reroute; int sysctl_sync_ver; + int sysctl_sync_ports; + int sysctl_sync_qlen_max; + int sysctl_sync_sock_size; int sysctl_cache_bypass; int sysctl_expire_nodest_conn; int sysctl_expire_quiescent_template; int sysctl_sync_threshold[2]; + unsigned int sysctl_sync_refresh_period; + int sysctl_sync_retries; int sysctl_nat_icmp_send; /* ip_vs_lblc */ @@ -889,13 +904,11 @@ struct netns_ipvs { spinlock_t est_lock; struct timer_list est_timer; /* Estimation timer */ /* ip_vs_sync */ - struct list_head sync_queue; spinlock_t sync_lock; - struct ip_vs_sync_buff *sync_buff; + struct ipvs_master_sync_state *ms; spinlock_t sync_buff_lock; - struct sockaddr_in sync_mcast_addr; - struct task_struct *master_thread; - struct task_struct *backup_thread; + struct task_struct **backup_threads; + int threads_mask; int send_mesg_maxlen; int recv_mesg_maxlen; volatile int sync_state; @@ -912,6 +925,14 @@ struct netns_ipvs { #define DEFAULT_SYNC_THRESHOLD 3 #define DEFAULT_SYNC_PERIOD 50 #define DEFAULT_SYNC_VER 1 +#define DEFAULT_SYNC_REFRESH_PERIOD (0U * HZ) +#define DEFAULT_SYNC_RETRIES 0 +#define IPVS_SYNC_WAKEUP_RATE 8 +#define IPVS_SYNC_QLEN_MAX (IPVS_SYNC_WAKEUP_RATE * 4) +#define IPVS_SYNC_SEND_DELAY (HZ / 50) +#define IPVS_SYNC_CHECK_PERIOD HZ +#define IPVS_SYNC_FLUSH_TIME (HZ * 2) +#define IPVS_SYNC_PORTS_MAX (1 << 6) #ifdef CONFIG_SYSCTL @@ -922,7 +943,17 @@ static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) static inline int sysctl_sync_period(struct netns_ipvs *ipvs) { - return ipvs->sysctl_sync_threshold[1]; + return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]); +} + +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period); +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_retries; } static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) @@ -930,6 +961,21 @@ static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) return ipvs->sysctl_sync_ver; } +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return ACCESS_ONCE(ipvs->sysctl_sync_ports); +} + +static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_qlen_max; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return ipvs->sysctl_sync_sock_size; +} + #else static inline int sysctl_sync_threshold(struct netns_ipvs *ipvs) @@ -942,18 +988,43 @@ static inline int sysctl_sync_period(struct netns_ipvs *ipvs) return DEFAULT_SYNC_PERIOD; } +static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_REFRESH_PERIOD; +} + +static inline int sysctl_sync_retries(struct netns_ipvs *ipvs) +{ + return DEFAULT_SYNC_RETRIES & 3; +} + static inline int sysctl_sync_ver(struct netns_ipvs *ipvs) { return DEFAULT_SYNC_VER; } +static inline int sysctl_sync_ports(struct netns_ipvs *ipvs) +{ + return 1; +} + +static inline int sysctl_sync_qlen_max(struct netns_ipvs *ipvs) +{ + return IPVS_SYNC_QLEN_MAX; +} + +static inline int sysctl_sync_sock_size(struct netns_ipvs *ipvs) +{ + return 0; +} + #endif /* * IPVS core functions * (from ip_vs_core.c) */ -extern const char *ip_vs_proto_name(unsigned proto); +extern const char *ip_vs_proto_name(unsigned int proto); extern void ip_vs_init_hash_table(struct list_head *table, int rows); #define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) @@ -1014,7 +1085,7 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, const union nf_inet_addr *daddr, - __be16 dport, unsigned flags, + __be16 dport, unsigned int flags, struct ip_vs_dest *dest, __u32 fwmark); extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); @@ -1184,10 +1255,8 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg); * IPVS control data and functions (from ip_vs_ctl.c) */ extern struct ip_vs_stats ip_vs_stats; -extern const struct ctl_path net_vs_ctl_path[]; extern int sysctl_ip_vs_sync_ver; -extern void ip_vs_sync_switch_mode(struct net *net, int mode); extern struct ip_vs_service * ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, const union nf_inet_addr *vaddr, __be16 vport); @@ -1203,6 +1272,8 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, extern int ip_vs_use_count_inc(void); extern void ip_vs_use_count_dec(void); +extern int ip_vs_register_nl_ioctl(void); +extern void ip_vs_unregister_nl_ioctl(void); extern int ip_vs_control_init(void); extern void ip_vs_control_cleanup(void); extern struct ip_vs_dest * @@ -1219,7 +1290,7 @@ extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); extern int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid); extern int stop_sync_thread(struct net *net, int state); -extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp); +extern void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts); /* diff --git a/include/net/ipip.h b/include/net/ipip.h index a32654d52730..a93cf6d7e94b 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -54,8 +54,10 @@ struct ip_tunnel_prl_entry { \ err = ip_local_out(skb); \ if (likely(net_xmit_eval(err) == 0)) { \ + u64_stats_update_begin(&(stats1)->syncp); \ (stats1)->tx_bytes += pkt_len; \ (stats1)->tx_packets++; \ + u64_stats_update_end(&(stats1)->syncp); \ } else { \ (stats2)->tx_errors++; \ (stats2)->tx_aborted_errors++; \ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e4170a22fc6f..aecf88436abf 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -113,7 +113,6 @@ struct frag_hdr { /* sysctls */ extern int sysctl_mld_max_msf; -extern struct ctl_path net_ipv6_ctl_path[]; #define _DEVINC(net, statname, modifier, idev, field) \ ({ \ @@ -264,7 +263,7 @@ extern struct ipv6_txoptions * ipv6_renew_options(struct sock *sk, struct ipv6_t struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, struct ipv6_txoptions *opt); -extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb); +extern bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb); int ip6_frag_nqueues(struct net *net); int ip6_frag_mem(struct net *net); @@ -333,8 +332,8 @@ static inline void ipv6_addr_set(struct in6_addr *addr, addr->s6_addr32[3] = w4; } -static inline int ipv6_addr_equal(const struct in6_addr *a1, - const struct in6_addr *a2) +static inline bool ipv6_addr_equal(const struct in6_addr *a1, + const struct in6_addr *a2) { return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | @@ -342,27 +341,27 @@ static inline int ipv6_addr_equal(const struct in6_addr *a1, (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; } -static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, - unsigned int prefixlen) +static inline bool __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, + unsigned int prefixlen) { - unsigned pdw, pbi; + unsigned int pdw, pbi; /* check complete u32 in prefix */ pdw = prefixlen >> 5; if (pdw && memcmp(a1, a2, pdw << 2)) - return 0; + return false; /* check incomplete u32 in prefix */ pbi = prefixlen & 0x1f; if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) - return 0; + return false; - return 1; + return true; } -static inline int ipv6_prefix_equal(const struct in6_addr *a1, - const struct in6_addr *a2, - unsigned int prefixlen) +static inline bool ipv6_prefix_equal(const struct in6_addr *a1, + const struct in6_addr *a2, + unsigned int prefixlen) { return __ipv6_prefix_equal(a1->s6_addr32, a2->s6_addr32, prefixlen); @@ -388,21 +387,21 @@ struct ip6_create_arg { }; void ip6_frag_init(struct inet_frag_queue *q, void *a); -int ip6_frag_match(struct inet_frag_queue *q, void *a); +bool ip6_frag_match(struct inet_frag_queue *q, void *a); -static inline int ipv6_addr_any(const struct in6_addr *a) +static inline bool ipv6_addr_any(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | a->s6_addr32[3]) == 0; } -static inline int ipv6_addr_loopback(const struct in6_addr *a) +static inline bool ipv6_addr_loopback(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0; } -static inline int ipv6_addr_v4mapped(const struct in6_addr *a) +static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) { return (a->s6_addr32[0] | a->s6_addr32[1] | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0; @@ -412,7 +411,7 @@ static inline int ipv6_addr_v4mapped(const struct in6_addr *a) * Check for a RFC 4843 ORCHID address * (Overlay Routable Cryptographic Hash Identifiers) */ -static inline int ipv6_addr_orchid(const struct in6_addr *a) +static inline bool ipv6_addr_orchid(const struct in6_addr *a) { return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); } @@ -560,7 +559,7 @@ extern void ipv6_push_frag_opts(struct sk_buff *skb, extern int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, __be16 *frag_offp); -extern int ipv6_ext_hdr(u8 nexthdr); +extern bool ipv6_ext_hdr(u8 nexthdr); extern int ipv6_find_tlv(struct sk_buff *skb, int offset, int type); @@ -661,8 +660,6 @@ extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); extern int ipv6_sysctl_register(void); extern void ipv6_sysctl_unregister(void); -extern int ipv6_static_sysctl_register(void); -extern void ipv6_static_sysctl_unregister(void); #endif #endif /* _NET_IPV6_H */ diff --git a/include/net/lapb.h b/include/net/lapb.h index fd2bf572ee1d..df892a94f2c6 100644 --- a/include/net/lapb.h +++ b/include/net/lapb.h @@ -149,4 +149,10 @@ extern int lapb_t1timer_running(struct lapb_cb *lapb); */ #define LAPB_DEBUG 0 +#define lapb_dbg(level, fmt, ...) \ +do { \ + if (level < LAPB_DEBUG) \ + pr_debug(fmt, ##__VA_ARGS__); \ +} while (0) + #endif diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h index 23a409381fa9..6ca3113df39e 100644 --- a/include/net/llc_c_ev.h +++ b/include/net/llc_c_ev.h @@ -264,6 +264,6 @@ extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk, static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) { return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < - (unsigned)sk->sk_rcvbuf; + (unsigned int)sk->sk_rcvbuf; } #endif /* LLC_C_EV_H */ diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h index f57e7d46a453..5a93d13ac95c 100644 --- a/include/net/llc_pdu.h +++ b/include/net/llc_pdu.h @@ -13,7 +13,6 @@ */ #include <linux/if_ether.h> -#include <linux/if_tr.h> /* Lengths of frame formats */ #define LLC_PDU_LEN_I 4 /* header and 2 control bytes */ @@ -253,10 +252,6 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) { - memcpy(sa, tr_hdr(skb)->saddr, ETH_ALEN); - *sa &= 0x7F; - } } /** @@ -270,8 +265,6 @@ static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da) { if (skb->protocol == htons(ETH_P_802_2)) memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN); - else if (skb->protocol == htons(ETH_P_TR_802_2)) - memcpy(da, tr_hdr(skb)->daddr, ETH_ALEN); } /** diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 9210bdc7bd8d..4d6e6c6818d0 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -95,9 +95,11 @@ struct device; * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. */ enum ieee80211_max_queues { - IEEE80211_MAX_QUEUES = 4, + IEEE80211_MAX_QUEUES = 16, }; +#define IEEE80211_INVAL_HW_QUEUE 0xff + /** * enum ieee80211_ac_numbers - AC numbers as used in mac80211 * @IEEE80211_AC_VO: voice @@ -244,7 +246,7 @@ enum ieee80211_rssi_event { * @channel_type: Channel type for this BSS -- the hardware might be * configured for HT40+ while this BSS only uses no-HT, for * example. - * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info). + * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation. * This field is only valid when the channel type is one of the HT types. * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value * implies disabled @@ -522,7 +524,7 @@ struct ieee80211_tx_rate { * * @flags: transmit info flags, defined above * @band: the band to transmit on (use for checking for races) - * @antenna_sel_tx: antenna to use, 0 for automatic diversity + * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC * @ack_frame_id: internal frame ID for TX status, used internally * @control: union for control data * @status: union for status data @@ -538,7 +540,7 @@ struct ieee80211_tx_info { u32 flags; u8 band; - u8 antenna_sel_tx; + u8 hw_queue; u16 ack_frame_id; @@ -564,7 +566,8 @@ struct ieee80211_tx_info { u8 ampdu_ack_len; int ack_signal; u8 ampdu_len; - /* 15 bytes free */ + u8 antenna; + /* 14 bytes free */ } status; struct { struct ieee80211_tx_rate driver_rates[ @@ -888,6 +891,8 @@ enum ieee80211_vif_flags { * these need to be set (or cleared) when the interface is added * or, if supported by the driver, the interface type is changed * at runtime, mac80211 will never touch this field + * @hw_queue: hardware queue for each AC + * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only * @drv_priv: data area for driver use, will always be aligned to * sizeof(void *). */ @@ -896,7 +901,12 @@ struct ieee80211_vif { struct ieee80211_bss_conf bss_conf; u8 addr[ETH_ALEN]; bool p2p; + + u8 cab_queue; + u8 hw_queue[IEEE80211_NUM_ACS]; + u32 driver_flags; + /* must be last */ u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); }; @@ -1174,6 +1184,15 @@ enum sta_notify_cmd { * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while * being idle (i.e. mac80211 doesn't have to go idle-off during the * the scan). + * + * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of + * a virtual monitor interface when monitor interfaces are the only + * active interfaces. + * + * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface + * queue mapping in order to use different queues (not just one per AC) + * for different virtual interfaces. See the doc section on HW queue + * control for more details. */ enum ieee80211_hw_flags { IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, @@ -1190,13 +1209,13 @@ enum ieee80211_hw_flags { IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, IEEE80211_HW_MFP_CAPABLE = 1<<13, - /* reuse bit 14 */ + IEEE80211_HW_WANT_MONITOR_VIF = 1<<14, IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, IEEE80211_HW_CONNECTION_MONITOR = 1<<19, - /* reuse bit 20 */ + IEEE80211_HW_QUEUE_CONTROL = 1<<20, IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, IEEE80211_HW_AP_LINK_PS = 1<<22, IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, @@ -1266,6 +1285,9 @@ enum ieee80211_hw_flags { * @max_tx_aggregation_subframes: maximum number of subframes in an * aggregate an HT driver will transmit, used by the peer as a * hint to size its reorder buffer. + * + * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX + * (if %IEEE80211_HW_QUEUE_CONTROL is set) */ struct ieee80211_hw { struct ieee80211_conf conf; @@ -1286,6 +1308,7 @@ struct ieee80211_hw { u8 max_rate_tries; u8 max_rx_aggregation_subframes; u8 max_tx_aggregation_subframes; + u8 offchannel_tx_hw_queue; }; /** @@ -1694,6 +1717,61 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb); */ /** + * DOC: HW queue control + * + * Before HW queue control was introduced, mac80211 only had a single static + * assignment of per-interface AC software queues to hardware queues. This + * was problematic for a few reasons: + * 1) off-channel transmissions might get stuck behind other frames + * 2) multiple virtual interfaces couldn't be handled correctly + * 3) after-DTIM frames could get stuck behind other frames + * + * To solve this, hardware typically uses multiple different queues for all + * the different usages, and this needs to be propagated into mac80211 so it + * won't have the same problem with the software queues. + * + * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability + * flag that tells it that the driver implements its own queue control. To do + * so, the driver will set up the various queues in each &struct ieee80211_vif + * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will + * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and + * if necessary will queue the frame on the right software queue that mirrors + * the hardware queue. + * Additionally, the driver has to then use these HW queue IDs for the queue + * management functions (ieee80211_stop_queue() et al.) + * + * The driver is free to set up the queue mappings as needed, multiple virtual + * interfaces may map to the same hardware queues if needed. The setup has to + * happen during add_interface or change_interface callbacks. For example, a + * driver supporting station+station and station+AP modes might decide to have + * 10 hardware queues to handle different scenarios: + * + * 4 AC HW queues for 1st vif: 0, 1, 2, 3 + * 4 AC HW queues for 2nd vif: 4, 5, 6, 7 + * after-DTIM queue for AP: 8 + * off-channel queue: 9 + * + * It would then set up the hardware like this: + * hw.offchannel_tx_hw_queue = 9 + * + * and the first virtual interface that is added as follows: + * vif.hw_queue[IEEE80211_AC_VO] = 0 + * vif.hw_queue[IEEE80211_AC_VI] = 1 + * vif.hw_queue[IEEE80211_AC_BE] = 2 + * vif.hw_queue[IEEE80211_AC_BK] = 3 + * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE + * and the second virtual interface with 4-7. + * + * If queue 6 gets full, for example, mac80211 would only stop the second + * virtual interface's BE queue since virtual interface queues are per AC. + * + * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE + * whenever the queue is not used (i.e. the interface is not in AP mode) if the + * queue could potentially be shared since mac80211 will look at cab_queue when + * a queue is stopped/woken even if the interface is not in AP mode. + */ + +/** * enum ieee80211_filter_flags - hardware filter flags * * These flags determine what the filter in hardware should be @@ -1780,6 +1858,18 @@ enum ieee80211_frame_release_type { }; /** + * enum ieee80211_rate_control_changed - flags to indicate what changed + * + * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit + * to this station changed. + * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed. + */ +enum ieee80211_rate_control_changed { + IEEE80211_RC_BW_CHANGED = BIT(0), + IEEE80211_RC_SMPS_CHANGED = BIT(1), +}; + +/** * struct ieee80211_ops - callbacks from mac80211 to the driver * * This structure contains various callbacks that the driver may @@ -1980,6 +2070,14 @@ enum ieee80211_frame_release_type { * up the list of states. * The callback can sleep. * + * @sta_rc_update: Notifies the driver of changes to the bitrates that can be + * used to transmit to the station. The changes are advertised with bits + * from &enum ieee80211_rate_control_changed and the values are reflected + * in the station data. This callback should only be used when the driver + * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since + * otherwise the rate control algorithm is notified directly. + * Must be atomic. + * * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), * bursting) for a hardware TX queue. * Returns a negative error code on failure. @@ -2125,6 +2223,14 @@ enum ieee80211_frame_release_type { * The @tids parameter is a bitmap and tells the driver which TIDs the * frames will be on; it will at most have two bits set. * This callback must be atomic. + * + * @get_et_sset_count: Ethtool API to get string-set count. + * + * @get_et_stats: Ethtool API to get a set of u64 stats. + * + * @get_et_strings: Ethtool API to get a set of strings to describe stats + * and perhaps other supported types of ethtool data-sets. + * */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); @@ -2135,6 +2241,7 @@ struct ieee80211_ops { #ifdef CONFIG_PM int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int (*resume)(struct ieee80211_hw *hw); + void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled); #endif int (*add_interface)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); @@ -2196,8 +2303,12 @@ struct ieee80211_ops { struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state); + void (*sta_rc_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + u32 changed); int (*conf_tx)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, u16 queue, + struct ieee80211_vif *vif, u16 ac, const struct ieee80211_tx_queue_params *params); u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -2250,6 +2361,15 @@ struct ieee80211_ops { u16 tids, int num_frames, enum ieee80211_frame_release_type reason, bool more_data); + + int (*get_et_sset_count)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int sset); + void (*get_et_stats)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ethtool_stats *stats, u64 *data); + void (*get_et_strings)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u32 sset, u8 *data); }; /** @@ -2844,6 +2964,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, */ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + enum ieee80211_band band, size_t frame_len, struct ieee80211_rate *rate); @@ -3512,19 +3633,6 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn); /* Rate control API */ /** - * enum rate_control_changed - flags to indicate which parameter changed - * - * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have - * changed, rate control algorithm can update its internal state if needed. - * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate - * control algorithm needs to adjust accordingly. - */ -enum rate_control_changed { - IEEE80211_RC_HT_CHANGED = BIT(0), - IEEE80211_RC_SMPS_CHANGED = BIT(1), -}; - -/** * struct ieee80211_tx_rate_control - rate control information for/from RC algo * * @hw: The hardware the algorithm is invoked for. @@ -3569,9 +3677,8 @@ struct rate_control_ops { void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, struct ieee80211_sta *sta, void *priv_sta); void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, - struct ieee80211_sta *sta, - void *priv_sta, u32 changed, - enum nl80211_channel_type oper_chan_type); + struct ieee80211_sta *sta, void *priv_sta, + u32 changed); void (*free_sta)(void *priv, struct ieee80211_sta *sta, void *priv_sta); @@ -3706,8 +3813,20 @@ void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif, void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); -int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb); +int ieee80211_add_srates_ie(struct ieee80211_vif *vif, + struct sk_buff *skb, bool need_basic); int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, - struct sk_buff *skb); + struct sk_buff *skb, bool need_basic); + +/** + * ieee80211_ave_rssi - report the average rssi for the specified interface + * + * @vif: the specified virtual interface + * + * This function return the average rssi value for the requested interface. + * It assumes that the given vif is valid. + */ +int ieee80211_ave_rssi(struct ieee80211_vif *vif); + #endif /* MAC80211_H */ diff --git a/include/net/mac802154.h b/include/net/mac802154.h new file mode 100644 index 000000000000..c9f8ab5cc687 --- /dev/null +++ b/include/net/mac802154.h @@ -0,0 +1,136 @@ +/* + * IEEE802.15.4-2003 specification + * + * Copyright (C) 2007-2012 Siemens AG + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef NET_MAC802154_H +#define NET_MAC802154_H + +#include <net/af_ieee802154.h> + +/* The following flags are used to indicate changed address settings from + * the stack to the hardware. + */ + +/* indicates that the Short Address changed */ +#define IEEE802515_AFILT_SADDR_CHANGED 0x00000001 +/* indicates that the IEEE Address changed */ +#define IEEE802515_AFILT_IEEEADDR_CHANGED 0x00000002 +/* indicates that the PAN ID changed */ +#define IEEE802515_AFILT_PANID_CHANGED 0x00000004 +/* indicates that PAN Coordinator status changed */ +#define IEEE802515_AFILT_PANC_CHANGED 0x00000008 + +struct ieee802154_hw_addr_filt { + __le16 pan_id; /* Each independent PAN selects a unique + * identifier. This PAN id allows communication + * between devices within a network using short + * addresses and enables transmissions between + * devices across independent networks. + */ + __le16 short_addr; + u8 ieee_addr[IEEE802154_ADDR_LEN]; + u8 pan_coord; +}; + +struct ieee802154_dev { + /* filled by the driver */ + int extra_tx_headroom; + u32 flags; + struct device *parent; + + /* filled by mac802154 core */ + struct ieee802154_hw_addr_filt hw_filt; + void *priv; + struct wpan_phy *phy; +}; + +/* Checksum is in hardware and is omitted from a packet + * + * These following flags are used to indicate hardware capabilities to + * the stack. Generally, flags here should have their meaning + * done in a way that the simplest hardware doesn't need setting + * any particular flags. There are some exceptions to this rule, + * however, so you are advised to review these flags carefully. + */ + +/* Indicates that receiver omits FCS and xmitter will add FCS on it's own. */ +#define IEEE802154_HW_OMIT_CKSUM 0x00000001 +/* Indicates that receiver will autorespond with ACK frames. */ +#define IEEE802154_HW_AACK 0x00000002 + +/* struct ieee802154_ops - callbacks from mac802154 to the driver + * + * This structure contains various callbacks that the driver may + * handle or, in some cases, must handle, for example to transmit + * a frame. + * + * start: Handler that 802.15.4 module calls for device initialization. + * This function is called before the first interface is attached. + * + * stop: Handler that 802.15.4 module calls for device cleanup. + * This function is called after the last interface is removed. + * + * xmit: Handler that 802.15.4 module calls for each transmitted frame. + * skb cntains the buffer starting from the IEEE 802.15.4 header. + * The low-level driver should send the frame based on available + * configuration. + * This function should return zero or negative errno. Called with + * pib_lock held. + * + * ed: Handler that 802.15.4 module calls for Energy Detection. + * This function should place the value for detected energy + * (usually device-dependant) in the level pointer and return + * either zero or negative errno. Called with pib_lock held. + * + * set_channel: + * Set radio for listening on specific channel. + * Set the device for listening on specified channel. + * Returns either zero, or negative errno. Called with pib_lock held. + * + * set_hw_addr_filt: + * Set radio for listening on specific address. + * Set the device for listening on specified address. + * Returns either zero, or negative errno. + */ +struct ieee802154_ops { + struct module *owner; + int (*start)(struct ieee802154_dev *dev); + void (*stop)(struct ieee802154_dev *dev); + int (*xmit)(struct ieee802154_dev *dev, + struct sk_buff *skb); + int (*ed)(struct ieee802154_dev *dev, u8 *level); + int (*set_channel)(struct ieee802154_dev *dev, + int page, + int channel); + int (*set_hw_addr_filt)(struct ieee802154_dev *dev, + struct ieee802154_hw_addr_filt *filt, + unsigned long changed); + int (*ieee_addr)(struct ieee802154_dev *dev, + u8 addr[IEEE802154_ADDR_LEN]); +}; + +/* Basic interface to register ieee802154 device */ +struct ieee802154_dev * +ieee802154_alloc_device(size_t priv_data_lex, struct ieee802154_ops *ops); +void ieee802154_free_device(struct ieee802154_dev *dev); +int ieee802154_register_device(struct ieee802154_dev *dev); +void ieee802154_unregister_device(struct ieee802154_dev *dev); + +void ieee802154_rx_irqsafe(struct ieee802154_dev *dev, struct sk_buff *skb, + u8 lqi); + +#endif /* NET_MAC802154_H */ diff --git a/include/net/ndisc.h b/include/net/ndisc.h index 6f9c25a76cd1..c02b6ad3f6c5 100644 --- a/include/net/ndisc.h +++ b/include/net/ndisc.h @@ -34,6 +34,7 @@ enum { __ND_OPT_ARRAY_MAX, ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ ND_OPT_RDNSS = 25, /* RFC5006 */ + ND_OPT_DNSSL = 31, /* RFC6106 */ __ND_OPT_MAX }; diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 34c996f46181..6cdfeedb650b 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -195,7 +195,6 @@ static inline void *neighbour_priv(const struct neighbour *n) #define NEIGH_UPDATE_F_ADMIN 0x80000000 extern void neigh_table_init(struct neigh_table *tbl); -extern void neigh_table_init_no_netlink(struct neigh_table *tbl); extern int neigh_table_clear(struct neigh_table *tbl); extern struct neighbour * neigh_lookup(struct neigh_table *tbl, const void *pkey, @@ -323,7 +322,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) #ifdef CONFIG_BRIDGE_NETFILTER static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) { - unsigned seq, hh_alen; + unsigned int seq, hh_alen; do { seq = read_seqbegin(&hh->hh_lock); @@ -336,7 +335,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { - unsigned seq; + unsigned int seq; int hh_len; do { diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index ee547c149810..ac9195e6a062 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -279,14 +279,25 @@ extern void unregister_pernet_subsys(struct pernet_operations *); extern int register_pernet_device(struct pernet_operations *); extern void unregister_pernet_device(struct pernet_operations *); -struct ctl_path; struct ctl_table; struct ctl_table_header; -extern struct ctl_table_header *register_net_sysctl_table(struct net *net, - const struct ctl_path *path, struct ctl_table *table); -extern struct ctl_table_header *register_net_sysctl_rotable( - const struct ctl_path *path, struct ctl_table *table); +#ifdef CONFIG_SYSCTL +extern int net_sysctl_init(void); +extern struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table); extern void unregister_net_sysctl_table(struct ctl_table_header *header); +#else +static inline int net_sysctl_init(void) { return 0; } +static inline struct ctl_table_header *register_net_sysctl(struct net *net, + const char *path, struct ctl_table *table) +{ + return NULL; +} +static inline void unregister_net_sysctl_table(struct ctl_table_header *header) +{ +} +#endif + #endif /* __NET_NET_NAMESPACE_H */ diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index ab86036bbf0c..cce7f6a798bf 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h @@ -321,14 +321,8 @@ extern unsigned int nf_conntrack_max; extern unsigned int nf_conntrack_hash_rnd; void init_nf_conntrack_hash_rnd(void); -#define NF_CT_STAT_INC(net, count) \ - __this_cpu_inc((net)->ct.stat->count) -#define NF_CT_STAT_INC_ATOMIC(net, count) \ -do { \ - local_bh_disable(); \ - __this_cpu_inc((net)->ct.stat->count); \ - local_bh_enable(); \ -} while (0) +#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) +#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) #define MODULE_ALIAS_NFCT_HELPER(helper) \ MODULE_ALIAS("nfct-helper-" helper) diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h index 5767dc242dee..1d1889409b9e 100644 --- a/include/net/netfilter/nf_conntrack_helper.h +++ b/include/net/netfilter/nf_conntrack_helper.h @@ -60,8 +60,8 @@ static inline struct nf_conn_help *nfct_help(const struct nf_conn *ct) return nf_ct_ext_find(ct, NF_CT_EXT_HELPER); } -extern int nf_conntrack_helper_init(void); -extern void nf_conntrack_helper_fini(void); +extern int nf_conntrack_helper_init(struct net *net); +extern void nf_conntrack_helper_fini(struct net *net); extern int nf_conntrack_broadcast_help(struct sk_buff *skb, unsigned int protoff, diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h index e8010f445ae1..9699c028b74b 100644 --- a/include/net/netfilter/nf_conntrack_l3proto.h +++ b/include/net/netfilter/nf_conntrack_l3proto.h @@ -65,7 +65,7 @@ struct nf_conntrack_l3proto { #ifdef CONFIG_SYSCTL struct ctl_table_header *ctl_table_header; - struct ctl_path *ctl_table_path; + const char *ctl_table_path; struct ctl_table *ctl_table; #endif /* CONFIG_SYSCTL */ diff --git a/include/net/netlink.h b/include/net/netlink.h index f394fe5d7641..785f37a3b44e 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -102,20 +102,6 @@ * nla_put_flag(skb, type) add flag attribute to skb * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb * - * Exceptions Based Attribute Construction: - * NLA_PUT(skb, type, len, data) add attribute to skb - * NLA_PUT_U8(skb, type, value) add u8 attribute to skb - * NLA_PUT_U16(skb, type, value) add u16 attribute to skb - * NLA_PUT_U32(skb, type, value) add u32 attribute to skb - * NLA_PUT_U64(skb, type, value) add u64 attribute to skb - * NLA_PUT_STRING(skb, type, str) add string attribute to skb - * NLA_PUT_FLAG(skb, type) add flag attribute to skb - * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb - * - * The meaning of these functions is equal to their lower case - * variants but they jump to the label nla_put_failure in case - * of a failure. - * * Nested Attributes Construction: * nla_nest_start(skb, type) start a nested attribute * nla_nest_end(skb, nla) finalize a nested attribute @@ -772,6 +758,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) } /** + * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put(skb, attrtype, sizeof(__be16), &value); +} + +/** + * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) +{ + return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) +{ + return nla_put(skb, attrtype, sizeof(__le16), &value); +} + +/** * nla_put_u32 - Add a u32 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -783,7 +802,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) } /** - * nla_put_64 - Add a u64 netlink attribute to a socket buffer + * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put(skb, attrtype, sizeof(__be32), &value); +} + +/** + * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) +{ + return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) +{ + return nla_put(skb, attrtype, sizeof(__le32), &value); +} + +/** + * nla_put_u64 - Add a u64 netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @value: numeric value @@ -794,6 +846,39 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value) } /** + * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put(skb, attrtype, sizeof(__be64), &value); +} + +/** + * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value) +{ + return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value); +} + +/** + * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @value: numeric value + */ +static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value) +{ + return nla_put(skb, attrtype, sizeof(__le64), &value); +} + +/** * nla_put_string - Add a string netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type @@ -828,60 +913,6 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, return nla_put(skb, attrtype, sizeof(u64), &tmp); } -#define NLA_PUT(skb, attrtype, attrlen, data) \ - do { \ - if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \ - goto nla_put_failure; \ - } while(0) - -#define NLA_PUT_TYPE(skb, type, attrtype, value) \ - do { \ - type __tmp = value; \ - NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \ - } while(0) - -#define NLA_PUT_U8(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u8, attrtype, value) - -#define NLA_PUT_U16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u16, attrtype, value) - -#define NLA_PUT_LE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __le16, attrtype, value) - -#define NLA_PUT_BE16(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be16, attrtype, value) - -#define NLA_PUT_NET16(skb, attrtype, value) \ - NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_U32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u32, attrtype, value) - -#define NLA_PUT_BE32(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be32, attrtype, value) - -#define NLA_PUT_NET32(skb, attrtype, value) \ - NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_U64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, u64, attrtype, value) - -#define NLA_PUT_BE64(skb, attrtype, value) \ - NLA_PUT_TYPE(skb, __be64, attrtype, value) - -#define NLA_PUT_NET64(skb, attrtype, value) \ - NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value) - -#define NLA_PUT_STRING(skb, attrtype, value) \ - NLA_PUT(skb, attrtype, strlen(value) + 1, value) - -#define NLA_PUT_FLAG(skb, attrtype) \ - NLA_PUT(skb, attrtype, 0, NULL) - -#define NLA_PUT_MSECS(skb, attrtype, jiffies) \ - NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies)) - /** * nla_get_u32 - return payload of u32 attribute * @nla: u32 netlink attribute diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h index 7a911eca0f18..a053a19870cf 100644 --- a/include/net/netns/conntrack.h +++ b/include/net/netns/conntrack.h @@ -26,11 +26,14 @@ struct netns_ct { int sysctl_tstamp; int sysctl_checksum; unsigned int sysctl_log_invalid; /* Log invalid packets */ + int sysctl_auto_assign_helper; + bool auto_assign_helper_warned; #ifdef CONFIG_SYSCTL struct ctl_table_header *sysctl_header; struct ctl_table_header *acct_sysctl_header; struct ctl_table_header *tstamp_sysctl_header; struct ctl_table_header *event_sysctl_header; + struct ctl_table_header *helper_sysctl_header; #endif char *slabname; }; diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h index 548d78f2cc47..c06ac58ca107 100644 --- a/include/net/netns/hash.h +++ b/include/net/netns/hash.h @@ -5,7 +5,7 @@ struct net; -static inline unsigned net_hash_mix(struct net *net) +static inline unsigned int net_hash_mix(struct net *net) { #ifdef CONFIG_NET_NS /* diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h index 81abfcb2eb4e..b42be53587ba 100644 --- a/include/net/netns/ipv6.h +++ b/include/net/netns/ipv6.h @@ -12,7 +12,9 @@ struct ctl_table_header; struct netns_sysctl_ipv6 { #ifdef CONFIG_SYSCTL - struct ctl_table_header *table; + struct ctl_table_header *hdr; + struct ctl_table_header *route_hdr; + struct ctl_table_header *icmp_hdr; struct ctl_table_header *frags_hdr; #endif int bindv6only; diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h new file mode 100644 index 000000000000..aca65a5a9d0d --- /dev/null +++ b/include/net/nfc/hci.h @@ -0,0 +1,198 @@ +/* + * Copyright (C) 2011 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __NET_HCI_H +#define __NET_HCI_H + +#include <linux/skbuff.h> + +#include <net/nfc/nfc.h> + +struct nfc_hci_dev; + +struct nfc_hci_ops { + int (*open) (struct nfc_hci_dev *hdev); + void (*close) (struct nfc_hci_dev *hdev); + int (*hci_ready) (struct nfc_hci_dev *hdev); + int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb); + int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols); + int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate, + struct nfc_target *target); + int (*data_exchange) (struct nfc_hci_dev *hdev, + struct nfc_target *target, + struct sk_buff *skb, struct sk_buff **res_skb); +}; + +#define NFC_HCI_MAX_CUSTOM_GATES 15 +struct nfc_hci_init_data { + u8 gate_count; + u8 gates[NFC_HCI_MAX_CUSTOM_GATES]; + char session_id[9]; +}; + +typedef int (*xmit) (struct sk_buff *skb, void *cb_data); + +#define NFC_HCI_MAX_GATES 256 + +struct nfc_hci_dev { + struct nfc_dev *ndev; + + u32 max_data_link_payload; + + struct mutex msg_tx_mutex; + + struct list_head msg_tx_queue; + + struct workqueue_struct *msg_tx_wq; + struct work_struct msg_tx_work; + + struct timer_list cmd_timer; + struct hci_msg *cmd_pending_msg; + + struct sk_buff_head rx_hcp_frags; + + struct workqueue_struct *msg_rx_wq; + struct work_struct msg_rx_work; + + struct sk_buff_head msg_rx_queue; + + struct nfc_hci_ops *ops; + + struct nfc_hci_init_data init_data; + + void *clientdata; + + u8 gate2pipe[NFC_HCI_MAX_GATES]; + + bool poll_started; + struct nfc_target *targets; + int target_count; + + u8 sw_romlib; + u8 sw_patch; + u8 sw_flashlib_major; + u8 sw_flashlib_minor; + + u8 hw_derivative; + u8 hw_version; + u8 hw_mpw; + u8 hw_software; + u8 hw_bsid; +}; + +/* hci device allocation */ +struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, + int tx_tailroom, + int max_link_payload); +void nfc_hci_free_device(struct nfc_hci_dev *hdev); + +int nfc_hci_register_device(struct nfc_hci_dev *hdev); +void nfc_hci_unregister_device(struct nfc_hci_dev *hdev); + +void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata); +void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev); + +/* Host IDs */ +#define NFC_HCI_HOST_CONTROLLER_ID 0x00 +#define NFC_HCI_TERMINAL_HOST_ID 0x01 +#define NFC_HCI_UICC_HOST_ID 0x02 + +/* Host Controller Gates and registry indexes */ +#define NFC_HCI_ADMIN_GATE 0x00 +#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01 +#define NFC_HCI_ADMIN_MAX_PIPE 0x02 +#define NFC_HCI_ADMIN_WHITELIST 0x03 +#define NFC_HCI_ADMIN_HOST_LIST 0x04 + +#define NFC_HCI_LOOPBACK_GATE 0x04 + +#define NFC_HCI_ID_MGMT_GATE 0x05 +#define NFC_HCI_ID_MGMT_VERSION_SW 0x01 +#define NFC_HCI_ID_MGMT_VERSION_HW 0x03 +#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04 +#define NFC_HCI_ID_MGMT_MODEL_ID 0x05 +#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02 +#define NFC_HCI_ID_MGMT_GATES_LIST 0x06 + +#define NFC_HCI_LINK_MGMT_GATE 0x06 +#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01 + +#define NFC_HCI_RF_READER_B_GATE 0x11 +#define NFC_HCI_RF_READER_B_PUPI 0x03 +#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04 +#define NFC_HCI_RF_READER_B_AFI 0x02 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01 +#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05 + +#define NFC_HCI_RF_READER_A_GATE 0x13 +#define NFC_HCI_RF_READER_A_UID 0x02 +#define NFC_HCI_RF_READER_A_ATQA 0x04 +#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05 +#define NFC_HCI_RF_READER_A_SAK 0x03 +#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06 +#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01 + +#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5) +#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1 +#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2 +#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3 + +/* Generic events */ +#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01 +#define NFC_HCI_EVT_POST_DATA 0x02 +#define NFC_HCI_EVT_HOT_PLUG 0x03 + +/* Reader RF gates events */ +#define NFC_HCI_EVT_READER_REQUESTED 0x10 +#define NFC_HCI_EVT_END_OPERATION 0x11 + +/* Reader Application gate events */ +#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10 + +/* receiving messages from lower layer */ +void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result, + struct sk_buff *skb); +void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, + struct sk_buff *skb); +void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, + struct sk_buff *skb); +void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb); + +/* connecting to gates and sending hci instructions */ +int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate); +int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate); +int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev); +int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + struct sk_buff **skb); +int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx, + const u8 *param, size_t param_len); +int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd, + const u8 *param, size_t param_len, struct sk_buff **skb); +int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response, + const u8 *param, size_t param_len); +int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event, + const u8 *param, size_t param_len); + +#endif /* __NET_HCI_H */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index bac070bf3514..9a2505a5b8de 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -62,10 +62,12 @@ struct nfc_ops { int (*data_exchange)(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context); + int (*check_presence)(struct nfc_dev *dev, u32 target_idx); }; #define NFC_TARGET_IDX_ANY -1 #define NFC_MAX_GT_LEN 48 +#define NFC_TARGET_IDX_NONE 0xffffffff struct nfc_target { u32 idx; @@ -78,6 +80,8 @@ struct nfc_target { u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; u8 sensf_res_len; u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; + u8 hci_reader_gate; + u8 logical_idx; }; struct nfc_genl_data { @@ -86,7 +90,8 @@ struct nfc_genl_data { }; struct nfc_dev { - unsigned idx; + unsigned int idx; + u32 target_next_idx; struct nfc_target *targets; int n_targets; int targets_generation; @@ -94,7 +99,7 @@ struct nfc_dev { struct device dev; bool dev_up; bool polling; - bool remote_activated; + u32 activated_target_idx; bool dep_link_up; u32 dep_rf_mode; struct nfc_genl_data genl_data; @@ -103,6 +108,10 @@ struct nfc_dev { int tx_headroom; int tx_tailroom; + struct timer_list check_pres_timer; + struct workqueue_struct *check_pres_wq; + struct work_struct check_pres_work; + struct nfc_ops *ops; }; #define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) @@ -181,6 +190,7 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev, int nfc_targets_found(struct nfc_dev *dev, struct nfc_target *targets, int ntargets); +int nfc_target_lost(struct nfc_dev *dev, u32 target_idx); int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, u8 comm_mode, u8 rf_mode); diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h new file mode 100644 index 000000000000..1071987d0408 --- /dev/null +++ b/include/net/nfc/shdlc.h @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2012 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __NFC_SHDLC_H +#define __NFC_SHDLC_H + +struct nfc_shdlc; + +struct nfc_shdlc_ops { + int (*open) (struct nfc_shdlc *shdlc); + void (*close) (struct nfc_shdlc *shdlc); + int (*hci_ready) (struct nfc_shdlc *shdlc); + int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb); + int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols); + int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate, + struct nfc_target *target); + int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate, + struct nfc_target *target); + int (*data_exchange) (struct nfc_shdlc *shdlc, + struct nfc_target *target, + struct sk_buff *skb, struct sk_buff **res_skb); +}; + +enum shdlc_state { + SHDLC_DISCONNECTED = 0, + SHDLC_CONNECTING = 1, + SHDLC_NEGOCIATING = 2, + SHDLC_CONNECTED = 3 +}; + +struct nfc_shdlc { + struct mutex state_mutex; + enum shdlc_state state; + int hard_fault; + + struct nfc_hci_dev *hdev; + + wait_queue_head_t *connect_wq; + int connect_tries; + int connect_result; + struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */ + + u8 w; /* window size */ + bool srej_support; + + struct timer_list t1_timer; /* send ack timeout */ + bool t1_active; + + struct timer_list t2_timer; /* guard/retransmit timeout */ + bool t2_active; + + int ns; /* next seq num for send */ + int nr; /* next expected seq num for receive */ + int dnr; /* oldest sent unacked seq num */ + + struct sk_buff_head rcv_q; + + struct sk_buff_head send_q; + bool rnr; /* other side is not ready to receive */ + + struct sk_buff_head ack_pending_q; + + struct workqueue_struct *sm_wq; + struct work_struct sm_work; + + struct nfc_shdlc_ops *ops; + + int client_headroom; + int client_tailroom; + + void *clientdata; +}; + +void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb); + +struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops, + struct nfc_hci_init_data *init_data, + u32 protocols, + int tx_headroom, int tx_tailroom, + int max_link_payload, const char *devname); + +void nfc_shdlc_free(struct nfc_shdlc *shdlc); + +void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata); +void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc); +struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc); + +#endif /* __NFC_SHDLC_H */ diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index fffdc603f4c8..66f5ac370f92 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h @@ -107,7 +107,7 @@ extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp, /* Calculate maximal size of packet seen by hard_start_xmit routine of this device. */ -static inline unsigned psched_mtu(const struct net_device *dev) +static inline unsigned int psched_mtu(const struct net_device *dev) { return dev->mtu + dev->hard_header_len; } diff --git a/include/net/rawv6.h b/include/net/rawv6.h index cf7577234457..e7ea660e4db6 100644 --- a/include/net/rawv6.h +++ b/include/net/rawv6.h @@ -5,7 +5,7 @@ void raw6_icmp_error(struct sk_buff *, int nexthdr, u8 type, u8 code, int inner_offset, __be32); -int raw6_local_deliver(struct sk_buff *, int); +bool raw6_local_deliver(struct sk_buff *, int); extern int rawv6_rcv(struct sock *sk, struct sk_buff *skb); diff --git a/include/net/red.h b/include/net/red.h index 77d4c3745cb5..ef46058d35bf 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -245,7 +245,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms * * dummy packets as a burst after idle time, i.e. * - * p->qavg *= (1-W)^m + * v->qavg *= (1-W)^m * * This is an apparently overcomplicated solution (f.e. we have to * precompute a table to make this calculation in reasonable time) @@ -279,7 +279,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p unsigned int backlog) { /* - * NOTE: p->qavg is fixed point number with point at Wlog. + * NOTE: v->qavg is fixed point number with point at Wlog. * The formula below is equvalent to floating point * version: * @@ -390,7 +390,7 @@ static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) if (red_is_idling(v)) qavg = red_calc_qavg_from_idle_time(p, v); - /* p->qavg is fixed point number with point at Wlog */ + /* v->qavg is fixed point number with point at Wlog */ qavg >>= p->Wlog; if (qavg > p->target_max && p->max_P <= MAX_P_MAX) diff --git a/include/net/route.h b/include/net/route.h index b1c0d5b564c2..ed2b78e2375d 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -50,7 +50,7 @@ struct rtable { __be32 rt_key_src; int rt_genid; - unsigned rt_flags; + unsigned int rt_flags; __u16 rt_type; __u8 rt_key_tos; @@ -185,8 +185,8 @@ extern unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph unsigned short new_mtu, struct net_device *dev); extern void ip_rt_send_redirect(struct sk_buff *skb); -extern unsigned inet_addr_type(struct net *net, __be32 addr); -extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); +extern unsigned int inet_addr_type(struct net *net, __be32 addr); +extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); extern void ip_rt_multicast_event(struct in_device *); extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 370293901971..bbcfd0993432 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -41,9 +41,11 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) * @get_size: Function to calculate required room for dumping device * specific netlink attributes * @fill_info: Function to dump device specific netlink attributes - * @get_xstats_size: Function to calculate required room for dumping devic + * @get_xstats_size: Function to calculate required room for dumping device * specific statistics * @fill_xstats: Function to dump device specific statistics + * @get_tx_queues: Function to determine number of transmit queues to create when + * creating a new device. */ struct rtnl_link_ops { struct list_head list; @@ -75,9 +77,8 @@ struct rtnl_link_ops { size_t (*get_xstats_size)(const struct net_device *dev); int (*fill_xstats)(struct sk_buff *skb, const struct net_device *dev); - int (*get_tx_queues)(struct net *net, struct nlattr *tb[], - unsigned int *tx_queues, - unsigned int *real_tx_queues); + int (*get_tx_queues)(struct net *net, + struct nlattr *tb[]); }; extern int __rtnl_link_register(struct rtnl_link_ops *ops); @@ -94,7 +95,7 @@ extern void rtnl_link_unregister(struct rtnl_link_ops *ops); * @fill_link_af: Function to fill IFLA_AF_SPEC with address family * specific netlink attributes. * @get_link_af_size: Function to calculate size of address family specific - * netlink attributes exlusive the container attribute. + * netlink attributes. * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr * for invalid configuration settings. * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6ee44b24864a..a2ef81466b00 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -704,4 +704,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); } +/* The cookie is always 0 since this is how it's used in the + * pmtu code. + */ +static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) +{ + if (t->dst && !dst_check(t->dst, 0)) { + dst_release(t->dst); + t->dst = NULL; + } + + return t->dst; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 88949a994538..e4652fe58958 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -1145,10 +1145,10 @@ struct sctp_outq { /* Data pending that has never been transmitted. */ struct list_head out_chunk_list; - unsigned out_qlen; /* Total length of queued data chunks. */ + unsigned int out_qlen; /* Total length of queued data chunks. */ /* Error of send failed, may used in SCTP_SEND_FAILED event. */ - unsigned error; + unsigned int error; /* These are control chunks we want to send. */ struct list_head control_chunk_list; @@ -2000,8 +2000,8 @@ void sctp_assoc_update(struct sctp_association *old, __u32 sctp_association_get_next_tsn(struct sctp_association *); void sctp_assoc_sync_pmtu(struct sctp_association *); -void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned); -void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); +void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int); +void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int); void sctp_assoc_set_primary(struct sctp_association *, struct sctp_transport *); void sctp_assoc_del_nonprimary_peers(struct sctp_association *, diff --git a/include/net/sock.h b/include/net/sock.h index a6ba1f8871fd..d89f0582b6b6 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -70,16 +70,16 @@ struct cgroup; struct cgroup_subsys; #ifdef CONFIG_NET -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss); -void mem_cgroup_sockets_destroy(struct cgroup *cgrp); +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg); #else static inline -int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss) +int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { return 0; } static inline -void mem_cgroup_sockets_destroy(struct cgroup *cgrp) +void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg) { } #endif @@ -97,7 +97,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp) #else /* Validate arguments and do nothing */ static inline __printf(2, 3) -void SOCK_DEBUG(struct sock *sk, const char *msg, ...) +void SOCK_DEBUG(const struct sock *sk, const char *msg, ...) { } #endif @@ -246,6 +246,7 @@ struct cg_proto; * @sk_user_data: RPC layer private data * @sk_sndmsg_page: cached page for sendmsg * @sk_sndmsg_off: cached offset for sendmsg + * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules * @sk_mark: generic packet mark @@ -371,11 +372,22 @@ struct sock { void (*sk_data_ready)(struct sock *sk, int bytes); void (*sk_write_space)(struct sock *sk); void (*sk_error_report)(struct sock *sk); - int (*sk_backlog_rcv)(struct sock *sk, - struct sk_buff *skb); + int (*sk_backlog_rcv)(struct sock *sk, + struct sk_buff *skb); void (*sk_destruct)(struct sock *sk); }; +/* + * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK + * or not whether his port will be reused by someone else. SK_FORCE_REUSE + * on a socket means that the socket will reuse everybody else's port + * without looking at the other's sk_reuse value. + */ + +#define SK_NO_REUSE 0 +#define SK_CAN_REUSE 1 +#define SK_FORCE_REUSE 2 + static inline int sk_peek_offset(struct sock *sk, int flags) { if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) @@ -442,40 +454,40 @@ static inline struct sock *sk_nulls_next(const struct sock *sk) NULL; } -static inline int sk_unhashed(const struct sock *sk) +static inline bool sk_unhashed(const struct sock *sk) { return hlist_unhashed(&sk->sk_node); } -static inline int sk_hashed(const struct sock *sk) +static inline bool sk_hashed(const struct sock *sk) { return !sk_unhashed(sk); } -static __inline__ void sk_node_init(struct hlist_node *node) +static inline void sk_node_init(struct hlist_node *node) { node->pprev = NULL; } -static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node) +static inline void sk_nulls_node_init(struct hlist_nulls_node *node) { node->pprev = NULL; } -static __inline__ void __sk_del_node(struct sock *sk) +static inline void __sk_del_node(struct sock *sk) { __hlist_del(&sk->sk_node); } /* NB: equivalent to hlist_del_init_rcu */ -static __inline__ int __sk_del_node_init(struct sock *sk) +static inline bool __sk_del_node_init(struct sock *sk) { if (sk_hashed(sk)) { __sk_del_node(sk); sk_node_init(&sk->sk_node); - return 1; + return true; } - return 0; + return false; } /* Grab socket reference count. This operation is valid only @@ -497,9 +509,9 @@ static inline void __sock_put(struct sock *sk) atomic_dec(&sk->sk_refcnt); } -static __inline__ int sk_del_node_init(struct sock *sk) +static inline bool sk_del_node_init(struct sock *sk) { - int rc = __sk_del_node_init(sk); + bool rc = __sk_del_node_init(sk); if (rc) { /* paranoid for a while -acme */ @@ -510,18 +522,18 @@ static __inline__ int sk_del_node_init(struct sock *sk) } #define sk_del_node_init_rcu(sk) sk_del_node_init(sk) -static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk) +static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk) { if (sk_hashed(sk)) { hlist_nulls_del_init_rcu(&sk->sk_nulls_node); - return 1; + return true; } - return 0; + return false; } -static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) +static inline bool sk_nulls_del_node_init_rcu(struct sock *sk) { - int rc = __sk_nulls_del_node_init_rcu(sk); + bool rc = __sk_nulls_del_node_init_rcu(sk); if (rc) { /* paranoid for a while -acme */ @@ -531,40 +543,40 @@ static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk) return rc; } -static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void __sk_add_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_node, list); } -static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list) +static inline void sk_add_node(struct sock *sk, struct hlist_head *list) { sock_hold(sk); __sk_add_node(sk, list); } -static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) +static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list) { sock_hold(sk); hlist_add_head_rcu(&sk->sk_node, list); } -static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list); } -static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) +static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list) { sock_hold(sk); __sk_nulls_add_node_rcu(sk, list); } -static __inline__ void __sk_del_bind_node(struct sock *sk) +static inline void __sk_del_bind_node(struct sock *sk) { __hlist_del(&sk->sk_bind_node); } -static __inline__ void sk_add_bind_node(struct sock *sk, +static inline void sk_add_bind_node(struct sock *sk, struct hlist_head *list) { hlist_add_head(&sk->sk_bind_node, list); @@ -638,7 +650,7 @@ static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag) __clear_bit(flag, &sk->sk_flags); } -static inline int sock_flag(struct sock *sk, enum sock_flags flag) +static inline bool sock_flag(const struct sock *sk, enum sock_flags flag) { return test_bit(flag, &sk->sk_flags); } @@ -653,7 +665,7 @@ static inline void sk_acceptq_added(struct sock *sk) sk->sk_ack_backlog++; } -static inline int sk_acceptq_is_full(struct sock *sk) +static inline bool sk_acceptq_is_full(const struct sock *sk) { return sk->sk_ack_backlog > sk->sk_max_ack_backlog; } @@ -661,19 +673,19 @@ static inline int sk_acceptq_is_full(struct sock *sk) /* * Compute minimal free write space needed to queue new packets. */ -static inline int sk_stream_min_wspace(struct sock *sk) +static inline int sk_stream_min_wspace(const struct sock *sk) { return sk->sk_wmem_queued >> 1; } -static inline int sk_stream_wspace(struct sock *sk) +static inline int sk_stream_wspace(const struct sock *sk) { return sk->sk_sndbuf - sk->sk_wmem_queued; } extern void sk_stream_write_space(struct sock *sk); -static inline int sk_stream_memory_free(struct sock *sk) +static inline bool sk_stream_memory_free(const struct sock *sk) { return sk->sk_wmem_queued < sk->sk_sndbuf; } @@ -698,17 +710,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb) * Do not take into account this skb truesize, * to allow even a single big packet to come. */ -static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) +static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb, + unsigned int limit) { unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); - return qsize > sk->sk_rcvbuf; + return qsize > limit; } /* The per-socket spinlock must be held here. */ -static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) +static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb, + unsigned int limit) { - if (sk_rcvqueues_full(sk, skb)) + if (sk_rcvqueues_full(sk, skb, limit)) return -ENOBUFS; __sk_add_backlog(sk, skb); @@ -795,26 +809,26 @@ struct module; * transport -> network interface is defined by struct inet_proto */ struct proto { - void (*close)(struct sock *sk, + void (*close)(struct sock *sk, long timeout); int (*connect)(struct sock *sk, - struct sockaddr *uaddr, + struct sockaddr *uaddr, int addr_len); int (*disconnect)(struct sock *sk, int flags); - struct sock * (*accept) (struct sock *sk, int flags, int *err); + struct sock * (*accept)(struct sock *sk, int flags, int *err); int (*ioctl)(struct sock *sk, int cmd, unsigned long arg); int (*init)(struct sock *sk); void (*destroy)(struct sock *sk); void (*shutdown)(struct sock *sk, int how); - int (*setsockopt)(struct sock *sk, int level, + int (*setsockopt)(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); - int (*getsockopt)(struct sock *sk, int level, - int optname, char __user *optval, - int __user *option); + int (*getsockopt)(struct sock *sk, int level, + int optname, char __user *optval, + int __user *option); #ifdef CONFIG_COMPAT int (*compat_setsockopt)(struct sock *sk, int level, @@ -831,14 +845,14 @@ struct proto { struct msghdr *msg, size_t len); int (*recvmsg)(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, - size_t len, int noblock, int flags, - int *addr_len); + size_t len, int noblock, int flags, + int *addr_len); int (*sendpage)(struct sock *sk, struct page *page, int offset, size_t size, int flags); - int (*bind)(struct sock *sk, + int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len); - int (*backlog_rcv) (struct sock *sk, + int (*backlog_rcv) (struct sock *sk, struct sk_buff *skb); /* Keeping track of sk's, looking them up, and port selection methods. */ @@ -900,9 +914,9 @@ struct proto { * This function has to setup any files the protocol want to * appear in the kmem cgroup filesystem. */ - int (*init_cgroup)(struct cgroup *cgrp, + int (*init_cgroup)(struct mem_cgroup *memcg, struct cgroup_subsys *ss); - void (*destroy_cgroup)(struct cgroup *cgrp); + void (*destroy_cgroup)(struct mem_cgroup *memcg); struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); #endif }; @@ -1128,9 +1142,9 @@ sk_sockets_allocated_read_positive(struct sock *sk) struct proto *prot = sk->sk_prot; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated); + return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated); - return percpu_counter_sum_positive(prot->sockets_allocated); + return percpu_counter_read_positive(prot->sockets_allocated); } static inline int @@ -1159,7 +1173,7 @@ proto_memory_pressure(struct proto *prot) extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc); extern int sock_prot_inuse_get(struct net *net, struct proto *proto); #else -static void inline sock_prot_inuse_add(struct net *net, struct proto *prot, +static inline void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc) { } @@ -1246,24 +1260,24 @@ static inline int sk_mem_pages(int amt) return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT; } -static inline int sk_has_account(struct sock *sk) +static inline bool sk_has_account(struct sock *sk) { /* return true if protocol supports memory accounting */ return !!sk->sk_prot->memory_allocated; } -static inline int sk_wmem_schedule(struct sock *sk, int size) +static inline bool sk_wmem_schedule(struct sock *sk, int size) { if (!sk_has_account(sk)) - return 1; + return true; return size <= sk->sk_forward_alloc || __sk_mem_schedule(sk, size, SK_MEM_SEND); } -static inline int sk_rmem_schedule(struct sock *sk, int size) +static inline bool sk_rmem_schedule(struct sock *sk, int size) { if (!sk_has_account(sk)) - return 1; + return true; return size <= sk->sk_forward_alloc || __sk_mem_schedule(sk, size, SK_MEM_RECV); } @@ -1328,7 +1342,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) * Mark both the sk_lock and the sk_lock.slock as a * per-address-family lock class. */ -#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ +#define sock_lock_init_class_and_name(sk, sname, skey, name, key) \ do { \ sk->sk_lock.owned = 0; \ init_waitqueue_head(&sk->sk_lock.wq); \ @@ -1336,7 +1350,7 @@ do { \ debug_check_no_locks_freed((void *)&(sk)->sk_lock, \ sizeof((sk)->sk_lock)); \ lockdep_set_class_and_name(&(sk)->sk_lock.slock, \ - (skey), (sname)); \ + (skey), (sname)); \ lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ } while (0) @@ -1396,13 +1410,13 @@ extern int sock_setsockopt(struct socket *sock, int level, unsigned int optlen); extern int sock_getsockopt(struct socket *sock, int level, - int op, char __user *optval, + int op, char __user *optval, int __user *optlen); -extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, +extern struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, int noblock, int *errcode); -extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, +extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, unsigned long data_len, int noblock, @@ -1424,7 +1438,7 @@ static inline void sock_update_classid(struct sock *sk) * Functions to fill in entries in struct proto_ops when a protocol * does not implement a particular function. */ -extern int sock_no_bind(struct socket *, +extern int sock_no_bind(struct socket *, struct sockaddr *, int); extern int sock_no_connect(struct socket *, struct sockaddr *, int, int); @@ -1453,7 +1467,7 @@ extern int sock_no_mmap(struct file *file, struct vm_area_struct *vma); extern ssize_t sock_no_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, + int offset, size_t size, int flags); /* @@ -1476,7 +1490,7 @@ extern void sk_common_release(struct sock *sk); /* * Default socket callbacks and setup code */ - + /* Initialise core socket variables */ extern void sock_init_data(struct socket *sock, struct sock *sk); @@ -1676,7 +1690,7 @@ extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie); -static inline int sk_can_gso(const struct sock *sk) +static inline bool sk_can_gso(const struct sock *sk) { return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type); } @@ -1793,7 +1807,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk) * * Returns true if socket has write or read allocations */ -static inline int sk_has_allocations(const struct sock *sk) +static inline bool sk_has_allocations(const struct sock *sk) { return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk); } @@ -1832,9 +1846,7 @@ static inline int sk_has_allocations(const struct sock *sk) */ static inline bool wq_has_sleeper(struct socket_wq *wq) { - - /* - * We need to be sure we are in sync with the + /* We need to be sure we are in sync with the * add_wait_queue modifications to the wait queue. * * This memory barrier is paired in the sock_poll_wait. @@ -1856,22 +1868,21 @@ static inline void sock_poll_wait(struct file *filp, { if (!poll_does_not_wait(p) && wait_address) { poll_wait(filp, wait_address, p); - /* - * We need to be sure we are in sync with the + /* We need to be sure we are in sync with the * socket flags modification. * * This memory barrier is paired in the wq_has_sleeper. - */ + */ smp_mb(); } } /* - * Queue a received datagram if it will fit. Stream and sequenced + * Queue a received datagram if it will fit. Stream and sequenced * protocols can't normally use this as they need to fit buffers in * and play with them. * - * Inlined as it's very short and called for pretty much every + * Inlined as it's very short and called for pretty much every * packet ever received. */ @@ -1897,10 +1908,10 @@ static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk) sk_mem_charge(sk, skb->truesize); } -extern void sk_reset_timer(struct sock *sk, struct timer_list* timer, +extern void sk_reset_timer(struct sock *sk, struct timer_list *timer, unsigned long expires); -extern void sk_stop_timer(struct sock *sk, struct timer_list* timer); +extern void sk_stop_timer(struct sock *sk, struct timer_list *timer); extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); @@ -1909,7 +1920,7 @@ extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb); /* * Recover an error report and clear atomically */ - + static inline int sock_error(struct sock *sk) { int err; @@ -1925,7 +1936,7 @@ static inline unsigned long sock_wspace(struct sock *sk) if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc); - if (amt < 0) + if (amt < 0) amt = 0; } return amt; @@ -1969,7 +1980,7 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk) /* * Default write policy as shown to user space via poll/select/SIGIO */ -static inline int sock_writeable(const struct sock *sk) +static inline bool sock_writeable(const struct sock *sk) { return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1); } @@ -1979,12 +1990,12 @@ static inline gfp_t gfp_any(void) return in_softirq() ? GFP_ATOMIC : GFP_KERNEL; } -static inline long sock_rcvtimeo(const struct sock *sk, int noblock) +static inline long sock_rcvtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_rcvtimeo; } -static inline long sock_sndtimeo(const struct sock *sk, int noblock) +static inline long sock_sndtimeo(const struct sock *sk, bool noblock) { return noblock ? 0 : sk->sk_sndtimeo; } @@ -2007,7 +2018,7 @@ extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk, struct sk_buff *skb); -static __inline__ void +static inline void sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) { ktime_t kt = skb->tstamp; @@ -2048,7 +2059,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, (1UL << SOCK_RCVTSTAMP) | \ (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ - (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ + (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) if (sk->sk_flags & FLAGS_TS_OR_DROPS) @@ -2077,7 +2088,7 @@ extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); * locked so that the sk_buff queue operation is ok. */ #ifdef CONFIG_NET_DMA -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); if (!copied_early) @@ -2086,7 +2097,7 @@ static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_e __skb_queue_tail(&sk->sk_async_wait_queue, skb); } #else -static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early) +static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, bool copied_early) { __skb_unlink(skb, &sk->sk_receive_queue); __kfree_skb(skb); @@ -2133,8 +2144,8 @@ extern void sock_enable_timestamp(struct sock *sk, int flag); extern int sock_get_timestamp(struct sock *, struct timeval __user *); extern int sock_get_timestampns(struct sock *, struct timespec __user *); -/* - * Enable debug/info messages +/* + * Enable debug/info messages */ extern int net_msg_warn; #define NETDEBUG(fmt, args...) \ diff --git a/include/net/tcp.h b/include/net/tcp.h index f75a04d752cb..e79aa48d9fc1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -123,7 +123,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); #endif #define TCP_RTO_MAX ((unsigned)(120*HZ)) #define TCP_RTO_MIN ((unsigned)(HZ/5)) -#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */ +#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now * used as a fallback RTO for the * initial data transmission if no @@ -252,6 +252,7 @@ extern int sysctl_tcp_max_ssthresh; extern int sysctl_tcp_cookie_size; extern int sysctl_tcp_thin_linear_timeouts; extern int sysctl_tcp_thin_dupack; +extern int sysctl_tcp_early_retrans; extern atomic_long_t tcp_memory_allocated; extern struct percpu_counter tcp_sockets_allocated; @@ -262,14 +263,14 @@ extern int tcp_memory_pressure; * and worry about wraparound (automatic with unsigned arithmetic). */ -static inline int before(__u32 seq1, __u32 seq2) +static inline bool before(__u32 seq1, __u32 seq2) { return (__s32)(seq1-seq2) < 0; } #define after(seq2, seq1) before(seq1, seq2) /* is s2<=s1<=s3 ? */ -static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) +static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3) { return seq3 - seq2 >= seq1 - seq2; } @@ -304,7 +305,7 @@ static inline void tcp_synq_overflow(struct sock *sk) } /* syncookies: no recent synqueue overflow on this listening socket? */ -static inline int tcp_synq_no_recent_overflow(const struct sock *sk) +static inline bool tcp_synq_no_recent_overflow(const struct sock *sk) { unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp; return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK); @@ -366,13 +367,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk, #define TCP_ECN_DEMAND_CWR 4 #define TCP_ECN_SEEN 8 -static __inline__ void -TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) -{ - if (sysctl_tcp_ecn && th->ece && th->cwr) - inet_rsk(req)->ecn_ok = 1; -} - enum tcp_tw_status { TCP_TW_SUCCESS = 0, TCP_TW_RST = 1, @@ -389,12 +383,13 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, struct request_sock **prev); extern int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb); -extern int tcp_use_frto(struct sock *sk); +extern bool tcp_use_frto(struct sock *sk); extern void tcp_enter_frto(struct sock *sk); extern void tcp_enter_loss(struct sock *sk, int how); extern void tcp_clear_retrans(struct tcp_sock *tp); extern void tcp_update_metrics(struct sock *sk); extern void tcp_close(struct sock *sk, long timeout); +extern void tcp_init_sock(struct sock *sk); extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait); extern int tcp_getsockopt(struct sock *sk, int level, int optname, @@ -435,6 +430,9 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct request_values *rvp); extern int tcp_disconnect(struct sock *sk, int flags); +void tcp_connect_init(struct sock *sk); +void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); +int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; @@ -472,7 +470,7 @@ static inline __u32 cookie_v6_init_sequence(struct sock *sk, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, int nonagle); -extern int tcp_may_send_now(struct sock *sk); +extern bool tcp_may_send_now(struct sock *sk); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern void tcp_retransmit_timer(struct sock *sk); extern void tcp_xmit_retransmit_queue(struct sock *); @@ -486,15 +484,17 @@ extern int tcp_write_wakeup(struct sock *); extern void tcp_send_fin(struct sock *sk); extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); extern int tcp_send_synack(struct sock *); -extern int tcp_syn_flood_action(struct sock *sk, - const struct sk_buff *skb, - const char *proto); +extern bool tcp_syn_flood_action(struct sock *sk, + const struct sk_buff *skb, + const char *proto); extern void tcp_push_one(struct sock *, unsigned int mss_now); extern void tcp_send_ack(struct sock *sk); extern void tcp_send_delayed_ack(struct sock *sk); /* tcp_input.c */ extern void tcp_cwnd_application_limited(struct sock *sk); +extern void tcp_resume_early_retransmit(struct sock *sk); +extern void tcp_rearm_rto(struct sock *sk); /* tcp_timer.c */ extern void tcp_init_xmit_timers(struct sock *); @@ -540,8 +540,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, extern void tcp_initialize_rcv_mss(struct sock *sk); -extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu); -extern int tcp_mss_to_mtu(const struct sock *sk, int mss); +extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); +extern int tcp_mss_to_mtu(struct sock *sk, int mss); extern void tcp_mtup_init(struct sock *sk); extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); @@ -609,6 +609,8 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp) */ extern u32 __tcp_select_window(struct sock *sk); +void tcp_send_window_probe(struct sock *sk); + /* TCP timestamps are only 32-bits, this causes a slight * complication on 64-bit systems since we store a snapshot * of jiffies in the buffer control blocks below. We decided @@ -645,21 +647,38 @@ struct tcp_skb_cb { __u32 end_seq; /* SEQ + FIN + SYN + datalen */ __u32 when; /* used to compute rtt's */ __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ + __u8 sacked; /* State flags for SACK/FACK. */ #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ #define TCPCB_LOST 0x04 /* SKB is lost */ #define TCPCB_TAGBITS 0x07 /* All tag bits */ - __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ - /* 1 byte hole */ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) + __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ + /* 1 byte hole */ __u32 ack_seq; /* Sequence number ACK'd */ }; #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) +/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set + * + * If we receive a SYN packet with these bits set, it means a network is + * playing bad games with TOS bits. In order to avoid possible false congestion + * notifications, we disable TCP ECN negociation. + */ +static inline void +TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb) +{ + const struct tcphdr *th = tcp_hdr(skb); + + if (sysctl_tcp_ecn && th->ece && th->cwr && + INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield)) + inet_rsk(req)->ecn_ok = 1; +} + /* Due to TSO, an SKB can be composed of multiple actual * packets. To keep these tracked properly, we use this. */ @@ -775,12 +794,12 @@ static inline int tcp_is_sack(const struct tcp_sock *tp) return tp->rx_opt.sack_ok; } -static inline int tcp_is_reno(const struct tcp_sock *tp) +static inline bool tcp_is_reno(const struct tcp_sock *tp) { return !tcp_is_sack(tp); } -static inline int tcp_is_fack(const struct tcp_sock *tp) +static inline bool tcp_is_fack(const struct tcp_sock *tp) { return tp->rx_opt.sack_ok & TCP_FACK_ENABLED; } @@ -790,6 +809,21 @@ static inline void tcp_enable_fack(struct tcp_sock *tp) tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; } +/* TCP early-retransmit (ER) is similar to but more conservative than + * the thin-dupack feature. Enable ER only if thin-dupack is disabled. + */ +static inline void tcp_enable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = sysctl_tcp_early_retrans && + !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3; + tp->early_retrans_delayed = 0; +} + +static inline void tcp_disable_early_retrans(struct tcp_sock *tp) +{ + tp->do_early_retrans = 0; +} + static inline unsigned int tcp_left_out(const struct tcp_sock *tp) { return tp->sacked_out + tp->lost_out; @@ -867,7 +901,7 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp) { return tp->snd_una + tp->snd_wnd; } -extern int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); +extern bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, const struct sk_buff *skb) @@ -910,7 +944,7 @@ static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) return __skb_checksum_complete(skb); } -static inline int tcp_checksum_complete(struct sk_buff *skb) +static inline bool tcp_checksum_complete(struct sk_buff *skb) { return !skb_csum_unnecessary(skb) && __tcp_checksum_complete(skb); @@ -940,12 +974,12 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) * * NOTE: is this not too big to inline? */ -static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) +static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (sysctl_tcp_low_latency || !tp->ucopy.task) - return 0; + return false; __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; @@ -969,7 +1003,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) (3 * tcp_rto_min(sk)) / 4, TCP_RTO_MAX); } - return 1; + return true; } @@ -1074,28 +1108,28 @@ static inline int tcp_fin_time(const struct sock *sk) return fin_timeout; } -static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, - int paws_win) +static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, + int paws_win) { if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win) - return 1; + return true; if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)) - return 1; + return true; /* * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0, * then following tcp messages have valid values. Ignore 0 value, * or else 'negative' tsval might forbid us to accept their packets. */ if (!rx_opt->ts_recent) - return 1; - return 0; + return true; + return false; } -static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, - int rst) +static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, + int rst) { if (tcp_paws_check(rx_opt, 0)) - return 0; + return false; /* RST segments are not recommended to carry timestamp, and, if they do, it is recommended to ignore PAWS because @@ -1110,8 +1144,8 @@ static inline int tcp_paws_reject(const struct tcp_options_received *rx_opt, However, we can relax time bounds for RST segments to MSL. */ if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL) - return 0; - return 1; + return false; + return true; } static inline void tcp_mib_init(struct net *net) @@ -1226,7 +1260,7 @@ extern void tcp_put_md5sig_pool(void); extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, - unsigned header_len); + unsigned int header_len); extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key); @@ -1349,7 +1383,7 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) __skb_unlink(skb, &sk->sk_write_queue); } -static inline int tcp_write_queue_empty(struct sock *sk) +static inline bool tcp_write_queue_empty(struct sock *sk) { return skb_queue_empty(&sk->sk_write_queue); } @@ -1406,7 +1440,7 @@ static inline void tcp_highest_sack_combine(struct sock *sk, /* Determines whether this is a thin stream (which may suffer from * increased latency). Used to trigger latency-reducing mechanisms. */ -static inline unsigned int tcp_stream_is_thin(struct tcp_sock *tp) +static inline bool tcp_stream_is_thin(struct tcp_sock *tp) { return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp); } diff --git a/include/net/tcp_memcontrol.h b/include/net/tcp_memcontrol.h index 48410ff25c9e..7df18bc43a97 100644 --- a/include/net/tcp_memcontrol.h +++ b/include/net/tcp_memcontrol.h @@ -12,8 +12,8 @@ struct tcp_memcontrol { }; struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg); -int tcp_init_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss); -void tcp_destroy_cgroup(struct cgroup *cgrp); +int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss); +void tcp_destroy_cgroup(struct mem_cgroup *memcg); unsigned long long tcp_max_memory(const struct mem_cgroup *memcg); void tcp_prot_mem(struct mem_cgroup *memcg, long val, int idx); #endif /* _TCP_MEMCG_H */ diff --git a/include/net/udp.h b/include/net/udp.h index 5d606d9da9e5..065f379c6503 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -81,7 +81,7 @@ struct udp_table { extern struct udp_table udp_table; extern void udp_table_init(struct udp_table *, const char *); static inline struct udp_hslot *udp_hashslot(struct udp_table *table, - struct net *net, unsigned num) + struct net *net, unsigned int num) { return &table->hash[udp_hashfn(net, num, table->mask)]; } @@ -267,4 +267,8 @@ extern void udp_init(void); extern int udp4_ufo_send_check(struct sk_buff *skb); extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, netdev_features_t features); +extern void udp_encap_enable(void); +#if IS_ENABLED(CONFIG_IPV6) +extern void udpv6_encap_enable(void); +#endif #endif /* _UDP_H */ diff --git a/include/net/wimax.h b/include/net/wimax.h index 322ff4fbdb4a..bbb74f990cab 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h @@ -423,8 +423,8 @@ struct wimax_dev { int (*op_reset)(struct wimax_dev *wimax_dev); struct rfkill *rfkill; - unsigned rf_hw; - unsigned rf_sw; + unsigned int rf_hw; + unsigned int rf_sw; char name[32]; struct dentry *debugfs_dentry; diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h index ff27f1b078d1..b52bda8d13b1 100644 --- a/include/net/wpan-phy.h +++ b/include/net/wpan-phy.h @@ -25,6 +25,14 @@ #include <linux/mutex.h> #include <linux/bug.h> +/* According to the IEEE 802.15.4 stadard the upper most significant bits of + * the 32-bit channel bitmaps shall be used as an integer value to specify 32 + * possible channel pages. The lower 27 bits of the channel bit map shall be + * used as a bit mask to specify channel numbers within a channel page. + */ +#define WPAN_NUM_CHANNELS 27 +#define WPAN_NUM_PAGES 32 + struct wpan_phy { struct mutex pib_lock; @@ -43,7 +51,7 @@ struct wpan_phy { int idx; struct net_device *(*add_iface)(struct wpan_phy *phy, - const char *name); + const char *name, int type); void (*del_iface)(struct wpan_phy *phy, struct net_device *dev); char priv[0] __attribute__((__aligned__(NETDEV_ALIGN))); diff --git a/include/net/x25.h b/include/net/x25.h index a06119a05129..b4a8a8923128 100644 --- a/include/net/x25.h +++ b/include/net/x25.h @@ -305,7 +305,7 @@ static inline void x25_unregister_sysctl(void) {}; #endif /* CONFIG_SYSCTL */ struct x25_skb_cb { - unsigned flags; + unsigned int flags; }; #define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb)) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 96239e78e621..e0a55df5bde8 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -886,15 +886,15 @@ __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) return port; } -extern int xfrm_selector_match(const struct xfrm_selector *sel, - const struct flowi *fl, - unsigned short family); +extern bool xfrm_selector_match(const struct xfrm_selector *sel, + const struct flowi *fl, + unsigned short family); #ifdef CONFIG_SECURITY_NETWORK_XFRM /* If neither has a context --> match * Otherwise, both must have a context and the sids, doi, alg must match */ -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { return ((!s1 && !s2) || (s1 && s2 && @@ -903,9 +903,9 @@ static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ct (s1->ctx_alg == s2->ctx_alg))); } #else -static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) +static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) { - return 1; + return true; } #endif @@ -1682,8 +1682,9 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) { - if (m->m | m->v) - NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); + if ((m->m | m->v) && + nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m)) + goto nla_put_failure; return 0; nla_put_failure: |