summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/uaccess.h20
-rw-r--r--include/dt-bindings/net/mscc-phy-vsc8531.h21
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/bpf_verifier.h102
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/cec-funcs.h78
-rw-r--r--include/linux/cec.h5
-rw-r--r--include/linux/compiler.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/efi.h27
-rw-r--r--include/linux/fsnotify_backend.h6
-rw-r--r--include/linux/if_link.h1
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/ktime.h5
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h2
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/netdevice.h22
-rw-r--r--include/linux/netfilter.h63
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h4
-rw-r--r--include/linux/netfilter/nf_conntrack_proto_gre.h64
-rw-r--r--include/linux/netfilter_ingress.h18
-rw-r--r--include/linux/pagemap.h39
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/ptp_clock_kernel.h5
-rw-r--r--include/linux/qed/common_hsi.h1
-rw-r--r--include/linux/qed/qed_if.h9
-rw-r--r--include/linux/qed/qed_ll2_if.h139
-rw-r--r--include/linux/qed/qed_roce_if.h604
-rw-r--r--include/linux/qed/qede_roce.h88
-rw-r--r--include/linux/qed/rdma_common.h1
-rw-r--r--include/linux/rhashtable.h491
-rw-r--r--include/linux/skbuff.h15
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/uio.h2
-rw-r--r--include/linux/win_minmax.h37
-rw-r--r--include/media/cec.h2
-rw-r--r--include/net/addrconf.h3
-rw-r--r--include/net/bluetooth/bluetooth.h4
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h11
-rw-r--r--include/net/bluetooth/hci_mon.h4
-rw-r--r--include/net/bluetooth/mgmt.h24
-rw-r--r--include/net/dsa.h2
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet_connection_sock.h4
-rw-r--r--include/net/ip.h23
-rw-r--r--include/net/ip6_route.h3
-rw-r--r--include/net/ip_fib.h49
-rw-r--r--include/net/mpls.h15
-rw-r--r--include/net/ncsi.h5
-rw-r--r--include/net/netfilter/br_netfilter.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h4
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h14
-rw-r--r--include/net/netfilter/nf_log.h11
-rw-r--r--include/net/netfilter/nf_queue.h69
-rw-r--r--include/net/netfilter/nf_tables.h19
-rw-r--r--include/net/netfilter/nf_tables_bridge.h7
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h43
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h53
-rw-r--r--include/net/netns/netfilter.h2
-rw-r--r--include/net/pkt_cls.h18
-rw-r--r--include/net/sch_generic.h72
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h94
-rw-r--r--include/net/sctp/structs.h13
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/switchdev.h40
-rw-r--r--include/net/tc_act/tc_ife.h2
-rw-r--r--include/net/tc_act/tc_vlan.h25
-rw-r--r--include/net/tcp.h53
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rxrpc/packet.h1
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/trace/events/rxrpc.h492
-rw-r--r--include/uapi/linux/bpf.h28
-rw-r--r--include/uapi/linux/if_link.h29
-rw-r--r--include/uapi/linux/if_tunnel.h1
-rw-r--r--include/uapi/linux/inet_diag.h13
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/netfilter/nf_log.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h45
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h8
-rw-r--r--include/uapi/linux/netfilter/xt_hashlimit.h23
-rw-r--r--include/uapi/linux/pkt_cls.h1
-rw-r--r--include/uapi/linux/pkt_sched.h4
-rw-r--r--include/uapi/linux/tc_act/tc_ife.h3
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h1
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/xfrm.h2
97 files changed, 2823 insertions, 514 deletions
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 5dea1fb6979c..6df9b0749671 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -231,14 +231,18 @@ extern int __put_user_bad(void) __attribute__((noreturn));
might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*ptr)) ? \
__get_user((x), (__typeof__(*(ptr)) *)__p) : \
- -EFAULT; \
+ ((x) = (__typeof__(*(ptr)))0,-EFAULT); \
})
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size = __copy_from_user(x, ptr, size);
- return size ? -EFAULT : size;
+ size_t n = __copy_from_user(x, ptr, size);
+ if (unlikely(n)) {
+ memset(x + (size - n), 0, n);
+ return -EFAULT;
+ }
+ return 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -258,11 +262,13 @@ extern int __get_user_bad(void) __attribute__((noreturn));
static inline long copy_from_user(void *to,
const void __user * from, unsigned long n)
{
+ unsigned long res = n;
might_fault();
- if (access_ok(VERIFY_READ, from, n))
- return __copy_from_user(to, from, n);
- else
- return n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = __copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
static inline long copy_to_user(void __user *to,
diff --git a/include/dt-bindings/net/mscc-phy-vsc8531.h b/include/dt-bindings/net/mscc-phy-vsc8531.h
new file mode 100644
index 000000000000..2383dd20ff43
--- /dev/null
+++ b/include/dt-bindings/net/mscc-phy-vsc8531.h
@@ -0,0 +1,21 @@
+/*
+ * Device Tree constants for Microsemi VSC8531 PHY
+ *
+ * Author: Nagaraju Lakkaraju
+ *
+ * License: Dual MIT/GPL
+ * Copyright (c) 2016 Microsemi Corporation
+ */
+
+#ifndef _DT_BINDINGS_MSCC_VSC8531_H
+#define _DT_BINDINGS_MSCC_VSC8531_H
+
+/* MAC interface Edge rate control VDDMAC in milli Volts */
+#define MSCC_VDDMAC_3300 3300
+#define MSCC_VDDMAC_2500 2500
+#define MSCC_VDDMAC_1800 1800
+#define MSCC_VDDMAC_1500 1500
+#define MSCC_VDDMAC_MAX 4
+#define MSCC_SLOWDOWN_MAX 8
+
+#endif
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9a904f63f8c1..c201017b5730 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -96,6 +96,7 @@ enum bpf_return_type {
struct bpf_func_proto {
u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool gpl_only;
+ bool pkt_access;
enum bpf_return_type ret_type;
enum bpf_arg_type arg1_type;
enum bpf_arg_type arg2_type;
@@ -138,6 +139,13 @@ enum bpf_reg_type {
*/
PTR_TO_PACKET,
PTR_TO_PACKET_END, /* skb->data + headlen */
+
+ /* PTR_TO_MAP_VALUE_ADJ is used for doing pointer math inside of a map
+ * elem value. We only allow this if we can statically verify that
+ * access from this register are going to fall within the size of the
+ * map element.
+ */
+ PTR_TO_MAP_VALUE_ADJ,
};
struct bpf_prog;
@@ -151,7 +159,8 @@ struct bpf_verifier_ops {
*/
bool (*is_valid_access)(int off, int size, enum bpf_access_type type,
enum bpf_reg_type *reg_type);
-
+ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
+ const struct bpf_prog *prog);
u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
int src_reg, int ctx_off,
struct bpf_insn *insn, struct bpf_prog *prog);
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
new file mode 100644
index 000000000000..7035b997aaa5
--- /dev/null
+++ b/include/linux/bpf_verifier.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+#ifndef _LINUX_BPF_VERIFIER_H
+#define _LINUX_BPF_VERIFIER_H 1
+
+#include <linux/bpf.h> /* for enum bpf_reg_type */
+#include <linux/filter.h> /* for MAX_BPF_STACK */
+
+ /* Just some arbitrary values so we can safely do math without overflowing and
+ * are obviously wrong for any sort of memory access.
+ */
+#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+
+struct bpf_reg_state {
+ enum bpf_reg_type type;
+ /*
+ * Used to determine if any memory access using this register will
+ * result in a bad access.
+ */
+ u64 min_value, max_value;
+ union {
+ /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
+ s64 imm;
+
+ /* valid when type == PTR_TO_PACKET* */
+ struct {
+ u32 id;
+ u16 off;
+ u16 range;
+ };
+
+ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE |
+ * PTR_TO_MAP_VALUE_OR_NULL
+ */
+ struct bpf_map *map_ptr;
+ };
+};
+
+enum bpf_stack_slot_type {
+ STACK_INVALID, /* nothing was stored in this stack slot */
+ STACK_SPILL, /* register spilled into stack */
+ STACK_MISC /* BPF program wrote some data into this slot */
+};
+
+#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
+
+/* state of the program:
+ * type of all registers and stack info
+ */
+struct bpf_verifier_state {
+ struct bpf_reg_state regs[MAX_BPF_REG];
+ u8 stack_slot_type[MAX_BPF_STACK];
+ struct bpf_reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE];
+};
+
+/* linked list of verifier states used to prune search */
+struct bpf_verifier_state_list {
+ struct bpf_verifier_state state;
+ struct bpf_verifier_state_list *next;
+};
+
+struct bpf_insn_aux_data {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+};
+
+#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
+
+struct bpf_verifier_env;
+struct bpf_ext_analyzer_ops {
+ int (*insn_hook)(struct bpf_verifier_env *env,
+ int insn_idx, int prev_insn_idx);
+};
+
+/* single container for all structs
+ * one verifier_env per bpf_check() call
+ */
+struct bpf_verifier_env {
+ struct bpf_prog *prog; /* eBPF program being verified */
+ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */
+ int stack_size; /* number of states to be processed */
+ struct bpf_verifier_state cur_state; /* current verifier state */
+ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */
+ const struct bpf_ext_analyzer_ops *analyzer_ops; /* external analyzer ops */
+ void *analyzer_priv; /* pointer to external analyzer's private data */
+ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
+ u32 used_map_cnt; /* number of used maps */
+ u32 id_gen; /* used to generate unique reg IDs */
+ bool allow_ptr_leaks;
+ bool seen_direct_write;
+ bool varlen_map_value_access;
+ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
+};
+
+int bpf_analyzer(struct bpf_prog *prog, const struct bpf_ext_analyzer_ops *ops,
+ void *priv);
+
+#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6bd4..5f5270941ba0 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
* CAN common private data
*/
struct can_priv {
+ struct net_device *dev;
struct can_device_stats can_stats;
struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
- struct timer_list restart_timer;
+ struct delayed_work restart_work;
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/cec-funcs.h b/include/linux/cec-funcs.h
index 82c3d3b7269d..138bbf721e70 100644
--- a/include/linux/cec-funcs.h
+++ b/include/linux/cec-funcs.h
@@ -162,10 +162,11 @@ static inline void cec_msg_standby(struct cec_msg *msg)
/* One Touch Record Feature */
-static inline void cec_msg_record_off(struct cec_msg *msg)
+static inline void cec_msg_record_off(struct cec_msg *msg, bool reply)
{
msg->len = 2;
msg->msg[1] = CEC_MSG_RECORD_OFF;
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
struct cec_op_arib_data {
@@ -227,7 +228,7 @@ static inline void cec_set_digital_service_id(__u8 *msg,
if (digital->service_id_method == CEC_OP_SERVICE_ID_METHOD_BY_CHANNEL) {
*msg++ = (digital->channel.channel_number_fmt << 2) |
(digital->channel.major >> 8);
- *msg++ = digital->channel.major && 0xff;
+ *msg++ = digital->channel.major & 0xff;
*msg++ = digital->channel.minor >> 8;
*msg++ = digital->channel.minor & 0xff;
*msg++ = 0;
@@ -323,6 +324,7 @@ static inline void cec_msg_record_on_phys_addr(struct cec_msg *msg,
}
static inline void cec_msg_record_on(struct cec_msg *msg,
+ bool reply,
const struct cec_op_record_src *rec_src)
{
switch (rec_src->type) {
@@ -346,6 +348,7 @@ static inline void cec_msg_record_on(struct cec_msg *msg,
rec_src->ext_phys_addr.phys_addr);
break;
}
+ msg->reply = reply ? CEC_MSG_RECORD_STATUS : 0;
}
static inline void cec_ops_record_on(const struct cec_msg *msg,
@@ -1141,6 +1144,75 @@ static inline void cec_msg_give_device_vendor_id(struct cec_msg *msg,
msg->reply = reply ? CEC_MSG_DEVICE_VENDOR_ID : 0;
}
+static inline void cec_msg_vendor_command(struct cec_msg *msg,
+ __u8 size, const __u8 *vendor_cmd)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND;
+ memcpy(msg->msg + 2, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *vendor_cmd = msg->msg + 2;
+}
+
+static inline void cec_msg_vendor_command_with_id(struct cec_msg *msg,
+ __u32 vendor_id, __u8 size,
+ const __u8 *vendor_cmd)
+{
+ if (size > 11)
+ size = 11;
+ msg->len = 5 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_COMMAND_WITH_ID;
+ msg->msg[2] = vendor_id >> 16;
+ msg->msg[3] = (vendor_id >> 8) & 0xff;
+ msg->msg[4] = vendor_id & 0xff;
+ memcpy(msg->msg + 5, vendor_cmd, size);
+}
+
+static inline void cec_ops_vendor_command_with_id(const struct cec_msg *msg,
+ __u32 *vendor_id, __u8 *size,
+ const __u8 **vendor_cmd)
+{
+ *size = msg->len - 5;
+
+ if (*size > 11)
+ *size = 11;
+ *vendor_id = (msg->msg[2] << 16) | (msg->msg[3] << 8) | msg->msg[4];
+ *vendor_cmd = msg->msg + 5;
+}
+
+static inline void cec_msg_vendor_remote_button_down(struct cec_msg *msg,
+ __u8 size,
+ const __u8 *rc_code)
+{
+ if (size > 14)
+ size = 14;
+ msg->len = 2 + size;
+ msg->msg[1] = CEC_MSG_VENDOR_REMOTE_BUTTON_DOWN;
+ memcpy(msg->msg + 2, rc_code, size);
+}
+
+static inline void cec_ops_vendor_remote_button_down(const struct cec_msg *msg,
+ __u8 *size,
+ const __u8 **rc_code)
+{
+ *size = msg->len - 2;
+
+ if (*size > 14)
+ *size = 14;
+ *rc_code = msg->msg + 2;
+}
+
static inline void cec_msg_vendor_remote_button_up(struct cec_msg *msg)
{
msg->len = 2;
@@ -1277,7 +1349,7 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
- msg->msg[4] = ui_cmd->channel_identifier.major && 0xff;
+ msg->msg[4] = ui_cmd->channel_identifier.major & 0xff;
msg->msg[5] = ui_cmd->channel_identifier.minor >> 8;
msg->msg[6] = ui_cmd->channel_identifier.minor & 0xff;
break;
diff --git a/include/linux/cec.h b/include/linux/cec.h
index b3e22893a002..851968e803fa 100644
--- a/include/linux/cec.h
+++ b/include/linux/cec.h
@@ -364,7 +364,7 @@ struct cec_caps {
* @num_log_addrs: how many logical addresses should be claimed. Set by the
* caller.
* @vendor_id: the vendor ID of the device. Set by the caller.
- * @flags: set to 0.
+ * @flags: flags.
* @osd_name: the OSD name of the device. Set by the caller.
* @primary_device_type: the primary device type for each logical address.
* Set by the caller.
@@ -389,6 +389,9 @@ struct cec_log_addrs {
__u8 features[CEC_MAX_LOG_ADDRS][12];
};
+/* Allow a fallback to unregistered */
+#define CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK (1 << 0)
+
/* Events */
/* Event that occurs when the adapter state changes */
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 436aa4e42221..668569844d37 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -527,13 +527,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
* object's lifetime is managed by something other than RCU. That
* "something other" might be reference counting or simple immortality.
*
- * The seemingly unused size_t variable is to validate @p is indeed a pointer
- * type by making sure it can be dereferenced.
+ * The seemingly unused variable ___typecheck_p validates that @p is
+ * indeed a pointer type by using a pointer to typeof(*p) as the type.
+ * Taking a pointer to typeof(*p) again is needed in case p is void *.
*/
#define lockless_dereference(p) \
({ \
typeof(p) _________p1 = READ_ONCE(p); \
- size_t __maybe_unused __size_of_ptr = sizeof(*(p)); \
+ typeof(*(p)) *___typecheck_p __maybe_unused; \
smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
(_________p1); \
})
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 242bf530edfc..34bd80512a0c 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -1,6 +1,8 @@
#ifndef __CPUHOTPLUG_H
#define __CPUHOTPLUG_H
+#include <linux/types.h>
+
enum cpuhp_state {
CPUHP_OFFLINE,
CPUHP_CREATE_THREADS,
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..dc69df04abc1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
#define dma_mmap_writecombine dma_mmap_wc
#endif
-#ifdef CONFIG_NEED_DMA_MAP_STATE
+#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 7f5a58225385..0148a3046b48 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -118,6 +118,15 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+struct efi_boot_memmap {
+ efi_memory_desc_t **map;
+ unsigned long *map_size;
+ unsigned long *desc_size;
+ u32 *desc_ver;
+ unsigned long *key_ptr;
+ unsigned long *buff_size;
+};
+
/*
* EFI capsule flags
*/
@@ -946,7 +955,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm,
/* Iterate through an efi_memory_map */
#define for_each_efi_memory_desc_in_map(m, md) \
for ((md) = (m)->map; \
- ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
+ (md) && ((void *)(md) + (m)->desc_size) <= (m)->map_end; \
(md) = (void *)(md) + (m)->desc_size)
/**
@@ -1371,11 +1380,7 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
efi_loaded_image_t *image, int *cmd_line_len);
efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- efi_memory_desc_t **map,
- unsigned long *map_size,
- unsigned long *desc_size,
- u32 *desc_ver,
- unsigned long *key_ptr);
+ struct efi_boot_memmap *map);
efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
unsigned long size, unsigned long align,
@@ -1457,4 +1462,14 @@ extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
arch_efi_call_virt_teardown(); \
})
+typedef efi_status_t (*efi_exit_boot_map_processing)(
+ efi_system_table_t *sys_table_arg,
+ struct efi_boot_memmap *map,
+ void *priv);
+
+efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
+ void *handle,
+ struct efi_boot_memmap *map,
+ void *priv,
+ efi_exit_boot_map_processing priv_func);
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 58205f33af02..7268ed076be8 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -148,6 +148,7 @@ struct fsnotify_group {
#define FS_PRIO_1 1 /* fanotify content based access control */
#define FS_PRIO_2 2 /* fanotify pre-content access */
unsigned int priority;
+ bool shutdown; /* group is being shut down, don't queue more events */
/* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
struct mutex mark_mutex; /* protect marks_list */
@@ -179,7 +180,6 @@ struct fsnotify_group {
spinlock_t access_lock;
struct list_head access_list;
wait_queue_head_t access_waitq;
- atomic_t bypass_perm;
#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
int f_flags;
unsigned int max_marks;
@@ -292,6 +292,8 @@ extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *op
extern void fsnotify_get_group(struct fsnotify_group *group);
/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
+/* group destruction begins, stop queuing new events */
+extern void fsnotify_group_stop_queueing(struct fsnotify_group *group);
/* destroy group */
extern void fsnotify_destroy_group(struct fsnotify_group *group);
/* fasync handler function */
@@ -304,8 +306,6 @@ extern int fsnotify_add_event(struct fsnotify_group *group,
struct fsnotify_event *event,
int (*merge)(struct list_head *,
struct fsnotify_event *));
-/* Remove passed event from groups notification queue */
-extern void fsnotify_remove_event(struct fsnotify_group *group, struct fsnotify_event *event);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index f923d15b432c..0b17c585b5cd 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -25,5 +25,6 @@ struct ifla_vf_info {
__u32 max_tx_rate;
__u32 rss_query_en;
__u32 trusted;
+ __be16 vlan_proto;
};
#endif /* _LINUX_IF_LINK_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index c6dbcd84a2c7..7e9a789be5e0 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -18,6 +18,7 @@ struct ipv6_devconf {
__s32 dad_transmits;
__s32 rtr_solicits;
__s32 rtr_solicit_interval;
+ __s32 rtr_solicit_max_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
__s32 mldv1_unsolicited_report_interval;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b52424eaa0ed..0ac26c892fe2 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -945,6 +945,16 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
#endif
+/*
+ * The irqsave variants are for usage in non interrupt code. Do not use
+ * them in irq_chip callbacks. Use irq_gc_lock() instead.
+ */
+#define irq_gc_lock_irqsave(gc, flags) \
+ raw_spin_lock_irqsave(&(gc)->lock, flags)
+
+#define irq_gc_unlock_irqrestore(gc, flags) \
+ raw_spin_unlock_irqrestore(&(gc)->lock, flags)
+
static inline void irq_reg_writel(struct irq_chip_generic *gc,
u32 val, int reg_offset)
{
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 2b6a204bd8d4..aa118bad1407 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -231,6 +231,11 @@ static inline ktime_t ktime_sub_us(const ktime_t kt, const u64 usec)
return ktime_sub_ns(kt, usec * NSEC_PER_USEC);
}
+static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
+{
+ return ktime_sub_ns(kt, msec * NSEC_PER_MSEC);
+}
+
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 116b284bc4ce..1f3568694a57 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -309,7 +309,8 @@ int mlx4_get_vf_stats(struct mlx4_dev *dev, int port, int vf_idx,
struct ifla_vf_stats *vf_stats);
u32 mlx4_comm_get_version(void);
int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac);
-int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos);
+int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan,
+ u8 qos, __be16 proto);
int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
int max_tx_rate);
int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 42da3552f7cb..59b50d3eedb4 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -221,6 +221,7 @@ enum {
MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER = 1ULL << 34,
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT = 1ULL << 35,
+ MLX4_DEV_CAP_FLAG2_SVLAN_BY_QP = 1ULL << 36,
};
enum {
@@ -1371,6 +1372,8 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port,
int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable);
int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
+int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
+ bool *vlan_offload_disabled);
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index deaa2217214d..b4ee8f62ce8d 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -160,6 +160,7 @@ struct mlx4_qp_path {
enum { /* fl */
MLX4_FL_CV = 1 << 6,
+ MLX4_FL_SV = 1 << 5,
MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2,
MLX4_FL_ETH_SRC_CHECK_MC_LB = 1 << 1,
MLX4_FL_ETH_SRC_CHECK_UC_LB = 1 << 0,
@@ -267,6 +268,7 @@ enum {
MLX4_UPD_QP_PATH_MASK_FVL_RX = 16 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_UC_LB = 18 + 32,
MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB = 19 + 32,
+ MLX4_UPD_QP_PATH_MASK_SV = 22 + 32,
};
enum { /* param3 */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d351fd3e1049..e5fb81376e92 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -120,5 +120,5 @@ struct mfc_cache {
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3987b64040c5..19a1c0c2993b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait);
+ struct rtmsg *rtm, int nowait, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2095b6ab3661..136ae6bbe81e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -789,6 +789,7 @@ enum {
TC_SETUP_CLSU32,
TC_SETUP_CLSFLOWER,
TC_SETUP_MATCHALL,
+ TC_SETUP_CLSBPF,
};
struct tc_cls_u32_offload;
@@ -800,6 +801,7 @@ struct tc_to_netdev {
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
+ struct tc_cls_bpf_offload *cls_bpf;
};
};
@@ -924,6 +926,14 @@ struct netdev_xdp {
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
+ * bool (*ndo_has_offload_stats)(int attr_id)
+ * Return true if this device supports offload stats of this attr_id.
+ *
+ * int (*ndo_get_offload_stats)(int attr_id, const struct net_device *dev,
+ * void *attr_data)
+ * Get statistics for offload operations by attr_id. Write it into the
+ * attr_data pointer.
+ *
* int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device supports VLAN filtering this function is called when a
* VLAN id is registered.
@@ -936,7 +946,8 @@ struct netdev_xdp {
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
- * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
+ * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan,
+ * u8 qos, __be16 proto);
* int (*ndo_set_vf_rate)(struct net_device *dev, int vf, int min_tx_rate,
* int max_tx_rate);
* int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
@@ -1155,6 +1166,10 @@ struct net_device_ops {
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
+ bool (*ndo_has_offload_stats)(int attr_id);
+ int (*ndo_get_offload_stats)(int attr_id,
+ const struct net_device *dev,
+ void *attr_data);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
@@ -1173,7 +1188,8 @@ struct net_device_ops {
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
- int queue, u16 vlan, u8 qos);
+ int queue, u16 vlan,
+ u8 qos, __be16 proto);
int (*ndo_set_vf_rate)(struct net_device *dev,
int vf, int min_tx_rate,
int max_tx_rate);
@@ -1783,7 +1799,7 @@ struct net_device {
#endif
struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS
- struct list_head nf_hooks_ingress;
+ struct nf_hook_entry __rcu *nf_hooks_ingress;
#endif
unsigned char broadcast[MAX_ADDR_LEN];
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 9230f9aee896..abc7fdcb9eb1 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -55,12 +55,34 @@ struct nf_hook_state {
struct net_device *out;
struct sock *sk;
struct net *net;
- struct list_head *hook_list;
+ struct nf_hook_entry __rcu *hook_entries;
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
};
+typedef unsigned int nf_hookfn(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state);
+struct nf_hook_ops {
+ struct list_head list;
+
+ /* User fills in from here down. */
+ nf_hookfn *hook;
+ struct net_device *dev;
+ void *priv;
+ u_int8_t pf;
+ unsigned int hooknum;
+ /* Hooks are ordered in ascending priority. */
+ int priority;
+};
+
+struct nf_hook_entry {
+ struct nf_hook_entry __rcu *next;
+ struct nf_hook_ops ops;
+ const struct nf_hook_ops *orig_ops;
+};
+
static inline void nf_hook_state_init(struct nf_hook_state *p,
- struct list_head *hook_list,
+ struct nf_hook_entry *hook_entry,
unsigned int hook,
int thresh, u_int8_t pf,
struct net_device *indev,
@@ -76,26 +98,11 @@ static inline void nf_hook_state_init(struct nf_hook_state *p,
p->out = outdev;
p->sk = sk;
p->net = net;
- p->hook_list = hook_list;
+ RCU_INIT_POINTER(p->hook_entries, hook_entry);
p->okfn = okfn;
}
-typedef unsigned int nf_hookfn(void *priv,
- struct sk_buff *skb,
- const struct nf_hook_state *state);
-
-struct nf_hook_ops {
- struct list_head list;
- /* User fills in from here down. */
- nf_hookfn *hook;
- struct net_device *dev;
- void *priv;
- u_int8_t pf;
- unsigned int hooknum;
- /* Hooks are ordered in ascending priority. */
- int priority;
-};
struct nf_sockopt_ops {
struct list_head list;
@@ -133,6 +140,8 @@ int nf_register_hook(struct nf_hook_ops *reg);
void nf_unregister_hook(struct nf_hook_ops *reg);
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
+int _nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
+void _nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
/* Functions to register get/setsockopt ranges (non-inclusive). You
need to check permissions yourself! */
@@ -161,7 +170,8 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
int thresh)
{
- struct list_head *hook_list;
+ struct nf_hook_entry *hook_head;
+ int ret = 1;
#ifdef HAVE_JUMP_LABEL
if (__builtin_constant_p(pf) &&
@@ -170,16 +180,19 @@ static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
return 1;
#endif
- hook_list = &net->nf.hooks[pf][hook];
-
- if (!list_empty(hook_list)) {
+ rcu_read_lock();
+ hook_head = rcu_dereference(net->nf.hooks[pf][hook]);
+ if (hook_head) {
struct nf_hook_state state;
- nf_hook_state_init(&state, hook_list, hook, thresh,
+ nf_hook_state_init(&state, hook_head, hook, thresh,
pf, indev, outdev, sk, net, okfn);
- return nf_hook_slow(skb, &state);
+
+ ret = nf_hook_slow(skb, &state);
}
- return 1;
+ rcu_read_unlock();
+
+ return ret;
}
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 275505792664..1d1ef4e20512 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -4,13 +4,9 @@
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
- unsigned int searched;
unsigned int found;
- unsigned int new;
unsigned int invalid;
unsigned int ignore;
- unsigned int delete;
- unsigned int delete_list;
unsigned int insert;
unsigned int insert_failed;
unsigned int drop;
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h
index df78dc2b5524..dee0acd0dd31 100644
--- a/include/linux/netfilter/nf_conntrack_proto_gre.h
+++ b/include/linux/netfilter/nf_conntrack_proto_gre.h
@@ -1,68 +1,8 @@
#ifndef _CONNTRACK_PROTO_GRE_H
#define _CONNTRACK_PROTO_GRE_H
#include <asm/byteorder.h>
-
-/* GRE PROTOCOL HEADER */
-
-/* GRE Version field */
-#define GRE_VERSION_1701 0x0
-#define GRE_VERSION_PPTP 0x1
-
-/* GRE Protocol field */
-#define GRE_PROTOCOL_PPTP 0x880B
-
-/* GRE Flags */
-#define GRE_FLAG_C 0x80
-#define GRE_FLAG_R 0x40
-#define GRE_FLAG_K 0x20
-#define GRE_FLAG_S 0x10
-#define GRE_FLAG_A 0x80
-
-#define GRE_IS_C(f) ((f)&GRE_FLAG_C)
-#define GRE_IS_R(f) ((f)&GRE_FLAG_R)
-#define GRE_IS_K(f) ((f)&GRE_FLAG_K)
-#define GRE_IS_S(f) ((f)&GRE_FLAG_S)
-#define GRE_IS_A(f) ((f)&GRE_FLAG_A)
-
-/* GRE is a mess: Four different standards */
-struct gre_hdr {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
- __u16 rec:3,
- srr:1,
- seq:1,
- key:1,
- routing:1,
- csum:1,
- version:3,
- reserved:4,
- ack:1;
-#elif defined(__BIG_ENDIAN_BITFIELD)
- __u16 csum:1,
- routing:1,
- key:1,
- seq:1,
- srr:1,
- rec:3,
- ack:1,
- reserved:4,
- version:3;
-#else
-#error "Adjust your <asm/byteorder.h> defines"
-#endif
- __be16 protocol;
-};
-
-/* modified GRE header for PPTP */
-struct gre_hdr_pptp {
- __u8 flags; /* bitfield */
- __u8 version; /* should be GRE_VERSION_PPTP */
- __be16 protocol; /* should be GRE_PROTOCOL_PPTP */
- __be16 payload_len; /* size of ppp payload, not inc. gre header */
- __be16 call_id; /* peer's call_id for this session */
- __be32 seq; /* sequence number. Present if S==1 */
- __be32 ack; /* seq number of highest packet received by */
- /* sender in this session */
-};
+#include <net/gre.h>
+#include <net/pptp.h>
struct nf_ct_gre {
unsigned int stream_timeout;
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 5fcd375ef175..33e37fb41d5d 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -11,22 +11,30 @@ static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
return false;
#endif
- return !list_empty(&skb->dev->nf_hooks_ingress);
+ return rcu_access_pointer(skb->dev->nf_hooks_ingress);
}
+/* caller must hold rcu_read_lock */
static inline int nf_hook_ingress(struct sk_buff *skb)
{
+ struct nf_hook_entry *e = rcu_dereference(skb->dev->nf_hooks_ingress);
struct nf_hook_state state;
- nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
- NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
- skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
+ /* Must recheck the ingress hook head, in the event it became NULL
+ * after the check in nf_hook_ingress_active evaluated to true.
+ */
+ if (unlikely(!e))
+ return 0;
+
+ nf_hook_state_init(&state, e, NF_NETDEV_INGRESS, INT_MIN,
+ NFPROTO_NETDEV, skb->dev, NULL, NULL,
+ dev_net(skb->dev), NULL);
return nf_hook_slow(skb, &state);
}
static inline void nf_hook_ingress_init(struct net_device *dev)
{
- INIT_LIST_HEAD(&dev->nf_hooks_ingress);
+ RCU_INIT_POINTER(dev->nf_hooks_ingress, NULL);
}
#else /* CONFIG_NETFILTER_INGRESS */
static inline int nf_hook_ingress_active(struct sk_buff *skb)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 66a1260b33de..01e84436cddf 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -571,56 +571,57 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
*/
static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
{
- int ret = 0;
char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
/*
* Writing zeroes into userspace here is OK, because we know that if
* the zero gets there, we'll be overwriting it.
*/
- while (uaddr <= end) {
- ret = __put_user(0, uaddr);
- if (ret != 0)
- return ret;
+ do {
+ if (unlikely(__put_user(0, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK))
- ret = __put_user(0, end);
+ return __put_user(0, end);
- return ret;
+ return 0;
}
static inline int fault_in_multipages_readable(const char __user *uaddr,
int size)
{
volatile char c;
- int ret = 0;
const char __user *end = uaddr + size - 1;
if (unlikely(size == 0))
- return ret;
+ return 0;
- while (uaddr <= end) {
- ret = __get_user(c, uaddr);
- if (ret != 0)
- return ret;
+ if (unlikely(uaddr > end))
+ return -EFAULT;
+
+ do {
+ if (unlikely(__get_user(c, uaddr) != 0))
+ return -EFAULT;
uaddr += PAGE_SIZE;
- }
+ } while (uaddr <= end);
/* Check whether the range spilled into the next page. */
if (((unsigned long)uaddr & PAGE_MASK) ==
((unsigned long)end & PAGE_MASK)) {
- ret = __get_user(c, end);
- (void)c;
+ return __get_user(c, end);
}
- return ret;
+ (void)c;
+ return 0;
}
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 2d24b283aa2d..e25f1830fbcf 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -80,6 +80,7 @@ typedef enum {
PHY_INTERFACE_MODE_XGMII,
PHY_INTERFACE_MODE_MOCA,
PHY_INTERFACE_MODE_QSGMII,
+ PHY_INTERFACE_MODE_TRGMII,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -123,6 +124,8 @@ static inline const char *phy_modes(phy_interface_t interface)
return "moca";
case PHY_INTERFACE_MODE_QSGMII:
return "qsgmii";
+ case PHY_INTERFACE_MODE_TRGMII:
+ return "trgmii";
default:
return "unknown";
}
diff --git a/include/linux/property.h b/include/linux/property.h
index 3a2f9ae25c86..856e50b2140c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -190,7 +190,7 @@ struct property_entry {
.length = ARRAY_SIZE(_val_) * sizeof(_type_), \
.is_array = true, \
.is_string = false, \
- { .pointer = { _type_##_data = _val_ } }, \
+ { .pointer = { ._type_##_data = _val_ } }, \
}
#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 6b15e168148a..5ad54fc66cf0 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -127,6 +127,11 @@ struct ptp_clock;
*
* @info: Structure describing the new clock.
* @parent: Pointer to the parent device of the new clock.
+ *
+ * Returns a valid pointer on success or PTR_ERR on failure. If PHC
+ * support is missing at the configuration level, this function
+ * returns NULL, and drivers are expected to gracefully handle that
+ * case separately.
*/
extern struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 19027635df0d..734deb094618 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -674,6 +674,7 @@ union event_ring_data {
struct iscsi_eqe_data iscsi_info;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct regpair roce_handle;
};
/* Event Ring Entry */
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index e4546abcea08..f9ae903bbb84 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -34,6 +34,8 @@ enum dcbx_protocol_type {
DCBX_MAX_PROTOCOL_TYPE
};
+#define QED_ROCE_PROTOCOL_INDEX (3)
+
#ifdef CONFIG_DCB
#define QED_LLDP_CHASSIS_ID_STAT_LEN 4
#define QED_LLDP_PORT_ID_STAT_LEN 4
@@ -260,15 +262,15 @@ struct qed_dev_info {
/* MFW version */
u32 mfw_rev;
- bool rdma_supported;
-
u32 flash_size;
u8 mf_mode;
bool tx_switching;
+ bool rdma_supported;
};
enum qed_sb_type {
QED_SB_TYPE_L2_QUEUE,
+ QED_SB_TYPE_CNQ,
};
enum qed_protocol {
@@ -627,8 +629,9 @@ enum DP_MODULE {
QED_MSG_SP = 0x100000,
QED_MSG_STORAGE = 0x200000,
QED_MSG_CXT = 0x800000,
+ QED_MSG_LL2 = 0x1000000,
QED_MSG_ILT = 0x2000000,
- QED_MSG_ROCE = 0x4000000,
+ QED_MSG_RDMA = 0x4000000,
QED_MSG_DEBUG = 0x8000000,
/* to be added...up to 0x8000000 */
};
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
new file mode 100644
index 000000000000..fd75c265dba3
--- /dev/null
+++ b/include/linux/qed/qed_ll2_if.h
@@ -0,0 +1,139 @@
+/* QLogic qed NIC Driver
+ *
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_LL2_IF_H
+#define _QED_LL2_IF_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+
+struct qed_ll2_stats {
+ u64 gsi_invalid_hdr;
+ u64 gsi_invalid_pkt_length;
+ u64 gsi_unsupported_pkt_typ;
+ u64 gsi_crcchksm_error;
+
+ u64 packet_too_big_discard;
+ u64 no_buff_discard;
+
+ u64 rcv_ucast_bytes;
+ u64 rcv_mcast_bytes;
+ u64 rcv_bcast_bytes;
+ u64 rcv_ucast_pkts;
+ u64 rcv_mcast_pkts;
+ u64 rcv_bcast_pkts;
+
+ u64 sent_ucast_bytes;
+ u64 sent_mcast_bytes;
+ u64 sent_bcast_bytes;
+ u64 sent_ucast_pkts;
+ u64 sent_mcast_pkts;
+ u64 sent_bcast_pkts;
+};
+
+#define QED_LL2_UNUSED_HANDLE (0xff)
+
+struct qed_ll2_cb_ops {
+ int (*rx_cb)(void *, struct sk_buff *, u32, u32);
+ int (*tx_cb)(void *, struct sk_buff *, bool);
+};
+
+struct qed_ll2_params {
+ u16 mtu;
+ bool drop_ttl0_packets;
+ bool rx_vlan_stripping;
+ u8 tx_tc;
+ bool frags_mapped;
+ u8 ll2_mac_address[ETH_ALEN];
+};
+
+struct qed_ll2_ops {
+/**
+ * @brief start - initializes ll2
+ *
+ * @param cdev
+ * @param params - protocol driver configuration for the ll2.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params);
+
+/**
+ * @brief stop - stops the ll2
+ *
+ * @param cdev
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*stop)(struct qed_dev *cdev);
+
+/**
+ * @brief start_xmit - transmits an skb over the ll2 interface
+ *
+ * @param cdev
+ * @param skb
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb);
+
+/**
+ * @brief register_cb_ops - protocol driver register the callback for Rx/Tx
+ * packets. Should be called before `start'.
+ *
+ * @param cdev
+ * @param cookie - to be passed to the callback functions.
+ * @param ops - the callback functions to register for Rx / Tx.
+ *
+ * @return 0 on success, otherwise error value.
+ */
+ void (*register_cb_ops)(struct qed_dev *cdev,
+ const struct qed_ll2_cb_ops *ops,
+ void *cookie);
+
+/**
+ * @brief get LL2 related statistics
+ *
+ * @param cdev
+ * @param stats - pointer to struct that would be filled with stats
+ *
+ * @return 0 on success, error otherwise.
+ */
+ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats);
+};
+
+#ifdef CONFIG_QED_LL2
+int qed_ll2_alloc_if(struct qed_dev *);
+void qed_ll2_dealloc_if(struct qed_dev *);
+#else
+static const struct qed_ll2_ops qed_ll2_ops_pass = {
+ .start = NULL,
+ .stop = NULL,
+ .start_xmit = NULL,
+ .register_cb_ops = NULL,
+ .get_stats = NULL,
+};
+
+static inline int qed_ll2_alloc_if(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_ll2_dealloc_if(struct qed_dev *cdev)
+{
+}
+#endif
+#endif
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
new file mode 100644
index 000000000000..53047d3fa678
--- /dev/null
+++ b/include/linux/qed/qed_roce_if.h
@@ -0,0 +1,604 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef _QED_ROCE_IF_H
+#define _QED_ROCE_IF_H
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_if.h>
+#include <linux/qed/qed_ll2_if.h>
+#include <linux/qed/rdma_common.h>
+
+enum qed_roce_ll2_tx_dest {
+ /* Light L2 TX Destination to the Network */
+ QED_ROCE_LL2_TX_DEST_NW,
+
+ /* Light L2 TX Destination to the Loopback */
+ QED_ROCE_LL2_TX_DEST_LB,
+ QED_ROCE_LL2_TX_DEST_MAX
+};
+
+#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
+
+/* rdma interface */
+
+enum qed_roce_qp_state {
+ QED_ROCE_QP_STATE_RESET,
+ QED_ROCE_QP_STATE_INIT,
+ QED_ROCE_QP_STATE_RTR,
+ QED_ROCE_QP_STATE_RTS,
+ QED_ROCE_QP_STATE_SQD,
+ QED_ROCE_QP_STATE_ERR,
+ QED_ROCE_QP_STATE_SQE
+};
+
+enum qed_rdma_tid_type {
+ QED_RDMA_TID_REGISTERED_MR,
+ QED_RDMA_TID_FMR,
+ QED_RDMA_TID_MW_TYPE1,
+ QED_RDMA_TID_MW_TYPE2A
+};
+
+struct qed_rdma_events {
+ void *context;
+ void (*affiliated_event)(void *context, u8 fw_event_code,
+ void *fw_handle);
+ void (*unaffiliated_event)(void *context, u8 event_code);
+};
+
+struct qed_rdma_device {
+ u32 vendor_id;
+ u32 vendor_part_id;
+ u32 hw_ver;
+ u64 fw_ver;
+
+ u64 node_guid;
+ u64 sys_image_guid;
+
+ u8 max_cnq;
+ u8 max_sge;
+ u8 max_srq_sge;
+ u16 max_inline;
+ u32 max_wqe;
+ u32 max_srq_wqe;
+ u8 max_qp_resp_rd_atomic_resc;
+ u8 max_qp_req_rd_atomic_resc;
+ u64 max_dev_resp_rd_atomic_resc;
+ u32 max_cq;
+ u32 max_qp;
+ u32 max_srq;
+ u32 max_mr;
+ u64 max_mr_size;
+ u32 max_cqe;
+ u32 max_mw;
+ u32 max_fmr;
+ u32 max_mr_mw_fmr_pbl;
+ u64 max_mr_mw_fmr_size;
+ u32 max_pd;
+ u32 max_ah;
+ u8 max_pkey;
+ u16 max_srq_wr;
+ u8 max_stats_queues;
+ u32 dev_caps;
+
+ /* Abilty to support RNR-NAK generation */
+
+#define QED_RDMA_DEV_CAP_RNR_NAK_MASK 0x1
+#define QED_RDMA_DEV_CAP_RNR_NAK_SHIFT 0
+ /* Abilty to support shutdown port */
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_MASK 0x1
+#define QED_RDMA_DEV_CAP_SHUTDOWN_PORT_SHIFT 1
+ /* Abilty to support port active event */
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT_SHIFT 2
+ /* Abilty to support port change event */
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_MASK 0x1
+#define QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT_SHIFT 3
+ /* Abilty to support system image GUID */
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_MASK 0x1
+#define QED_RDMA_DEV_CAP_SYS_IMAGE_SHIFT 4
+ /* Abilty to support bad P_Key counter support */
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BAD_PKEY_CNT_SHIFT 5
+ /* Abilty to support atomic operations */
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_MASK 0x1
+#define QED_RDMA_DEV_CAP_ATOMIC_OP_SHIFT 6
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_CQ_SHIFT 7
+ /* Abilty to support modifying the maximum number of
+ * outstanding work requests per QP
+ */
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_MASK 0x1
+#define QED_RDMA_DEV_CAP_RESIZE_MAX_WR_SHIFT 8
+ /* Abilty to support automatic path migration */
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_MASK 0x1
+#define QED_RDMA_DEV_CAP_AUTO_PATH_MIG_SHIFT 9
+ /* Abilty to support the base memory management extensions */
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_MEMORY_EXT_SHIFT 10
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_BASE_QUEUE_EXT_SHIFT 11
+ /* Abilty to support multipile page sizes per memory region */
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_MASK 0x1
+#define QED_RDMA_DEV_CAP_MULTI_PAGE_PER_MR_EXT_SHIFT 12
+ /* Abilty to support block list physical buffer list */
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_MASK 0x1
+#define QED_RDMA_DEV_CAP_BLOCK_MODE_SHIFT 13
+ /* Abilty to support zero based virtual addresses */
+#define QED_RDMA_DEV_CAP_ZBVA_MASK 0x1
+#define QED_RDMA_DEV_CAP_ZBVA_SHIFT 14
+ /* Abilty to support local invalidate fencing */
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_MASK 0x1
+#define QED_RDMA_DEV_CAP_LOCAL_INV_FENCE_SHIFT 15
+ /* Abilty to support Loopback on QP */
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_MASK 0x1
+#define QED_RDMA_DEV_CAP_LB_INDICATOR_SHIFT 16
+ u64 page_size_caps;
+ u8 dev_ack_delay;
+ u32 reserved_lkey;
+ u32 bad_pkey_counter;
+ struct qed_rdma_events events;
+};
+
+enum qed_port_state {
+ QED_RDMA_PORT_UP,
+ QED_RDMA_PORT_DOWN,
+};
+
+enum qed_roce_capability {
+ QED_ROCE_V1 = 1 << 0,
+ QED_ROCE_V2 = 1 << 1,
+};
+
+struct qed_rdma_port {
+ enum qed_port_state port_state;
+ int link_speed;
+ u64 max_msg_size;
+ u8 source_gid_table_len;
+ void *source_gid_table_ptr;
+ u8 pkey_table_len;
+ void *pkey_table_ptr;
+ u32 pkey_bad_counter;
+ enum qed_roce_capability capability;
+};
+
+struct qed_rdma_cnq_params {
+ u8 num_pbl_pages;
+ u64 pbl_ptr;
+};
+
+/* The CQ Mode affects the CQ doorbell transaction size.
+ * 64/32 bit machines should configure to 32/16 bits respectively.
+ */
+enum qed_rdma_cq_mode {
+ QED_RDMA_CQ_MODE_16_BITS,
+ QED_RDMA_CQ_MODE_32_BITS,
+};
+
+struct qed_roce_dcqcn_params {
+ u8 notification_point;
+ u8 reaction_point;
+
+ /* fields for notification point */
+ u32 cnp_send_timeout;
+
+ /* fields for reaction point */
+ u32 rl_bc_rate;
+ u16 rl_max_rate;
+ u16 rl_r_ai;
+ u16 rl_r_hai;
+ u16 dcqcn_g;
+ u32 dcqcn_k_us;
+ u32 dcqcn_timeout_us;
+};
+
+struct qed_rdma_start_in_params {
+ struct qed_rdma_events *events;
+ struct qed_rdma_cnq_params cnq_pbl_list[128];
+ u8 desired_cnq;
+ enum qed_rdma_cq_mode cq_mode;
+ struct qed_roce_dcqcn_params dcqcn_params;
+ u16 max_mtu;
+ u8 mac_addr[ETH_ALEN];
+ u8 iwarp_flags;
+};
+
+struct qed_rdma_add_user_out_params {
+ u16 dpi;
+ u64 dpi_addr;
+ u64 dpi_phys_addr;
+ u32 dpi_size;
+};
+
+enum roce_mode {
+ ROCE_V1,
+ ROCE_V2_IPV4,
+ ROCE_V2_IPV6,
+ MAX_ROCE_MODE
+};
+
+union qed_gid {
+ u8 bytes[16];
+ u16 words[8];
+ u32 dwords[4];
+ u64 qwords[2];
+ u32 ipv4_addr;
+};
+
+struct qed_rdma_register_tid_in_params {
+ u32 itid;
+ enum qed_rdma_tid_type tid_type;
+ u8 key;
+ u16 pd;
+ bool local_read;
+ bool local_write;
+ bool remote_read;
+ bool remote_write;
+ bool remote_atomic;
+ bool mw_bind;
+ u64 pbl_ptr;
+ bool pbl_two_level;
+ u8 pbl_page_size_log;
+ u8 page_size_log;
+ u32 fbo;
+ u64 length;
+ u64 vaddr;
+ bool zbva;
+ bool phy_mr;
+ bool dma_mr;
+
+ bool dif_enabled;
+ u64 dif_error_addr;
+ u64 dif_runt_addr;
+};
+
+struct qed_rdma_create_cq_in_params {
+ u32 cq_handle_lo;
+ u32 cq_handle_hi;
+ u32 cq_size;
+ u16 dpi;
+ bool pbl_two_level;
+ u64 pbl_ptr;
+ u16 pbl_num_pages;
+ u8 pbl_page_size_log;
+ u8 cnq_id;
+ u16 int_timeout;
+};
+
+struct qed_rdma_create_srq_in_params {
+ u64 pbl_base_addr;
+ u64 prod_pair_addr;
+ u16 num_pages;
+ u16 pd_id;
+ u16 page_size;
+};
+
+struct qed_rdma_destroy_cq_in_params {
+ u16 icid;
+};
+
+struct qed_rdma_destroy_cq_out_params {
+ u16 num_cq_notif;
+};
+
+struct qed_rdma_create_qp_in_params {
+ u32 qp_handle_lo;
+ u32 qp_handle_hi;
+ u32 qp_handle_async_lo;
+ u32 qp_handle_async_hi;
+ bool use_srq;
+ bool signal_all;
+ bool fmr_and_reserved_lkey;
+ u16 pd;
+ u16 dpi;
+ u16 sq_cq_id;
+ u16 sq_num_pages;
+ u64 sq_pbl_ptr;
+ u8 max_sq_sges;
+ u16 rq_cq_id;
+ u16 rq_num_pages;
+ u64 rq_pbl_ptr;
+ u16 srq_id;
+ u8 stats_queue;
+};
+
+struct qed_rdma_create_qp_out_params {
+ u32 qp_id;
+ u16 icid;
+ void *rq_pbl_virt;
+ dma_addr_t rq_pbl_phys;
+ void *sq_pbl_virt;
+ dma_addr_t sq_pbl_phys;
+};
+
+struct qed_rdma_modify_qp_in_params {
+ u32 modify_flags;
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_NEW_STATE_SHIFT 0
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_PKEY_SHIFT 1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN_SHIFT 2
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_DEST_QP_SHIFT 3
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR_SHIFT 4
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RQ_PSN_SHIFT 5
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_SQ_PSN_SHIFT 6
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ_SHIFT 7
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_MASK 0x1
+#define QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP_SHIFT 8
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT_SHIFT 9
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RETRY_CNT_SHIFT 10
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT_SHIFT 11
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER_SHIFT 12
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN_SHIFT 13
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_MASK 0x1
+#define QED_ROCE_MODIFY_QP_VALID_ROCE_MODE_SHIFT 14
+
+ enum qed_roce_qp_state new_state;
+ u16 pkey;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ u32 dest_qp;
+ bool lb_indication;
+ u16 mtu;
+ u8 traffic_class_tos;
+ u8 hop_limit_ttl;
+ u32 flow_label;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u16 udp_src_port;
+
+ u16 vlan_id;
+
+ u32 rq_psn;
+ u32 sq_psn;
+ u8 max_rd_atomic_resp;
+ u8 max_rd_atomic_req;
+ u32 ack_timeout;
+ u8 retry_cnt;
+ u8 rnr_retry_cnt;
+ u8 min_rnr_nak_timer;
+ bool sqd_async;
+ u8 remote_mac_addr[6];
+ u8 local_mac_addr[6];
+ bool use_local_mac;
+ enum roce_mode roce_mode;
+};
+
+struct qed_rdma_query_qp_out_params {
+ enum qed_roce_qp_state state;
+ u32 rq_psn;
+ u32 sq_psn;
+ bool draining;
+ u16 mtu;
+ u32 dest_qp;
+ bool incoming_rdma_read_en;
+ bool incoming_rdma_write_en;
+ bool incoming_atomic_en;
+ bool e2e_flow_control_en;
+ union qed_gid sgid;
+ union qed_gid dgid;
+ u32 flow_label;
+ u8 hop_limit_ttl;
+ u8 traffic_class_tos;
+ u32 timeout;
+ u8 rnr_retry;
+ u8 retry_cnt;
+ u8 min_rnr_nak_timer;
+ u16 pkey_index;
+ u8 max_rd_atomic;
+ u8 max_dest_rd_atomic;
+ bool sqd_async;
+};
+
+struct qed_rdma_create_srq_out_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_destroy_srq_in_params {
+ u16 srq_id;
+};
+
+struct qed_rdma_modify_srq_in_params {
+ u32 wqe_limit;
+ u16 srq_id;
+};
+
+struct qed_rdma_stats_out_params {
+ u64 sent_bytes;
+ u64 sent_pkts;
+ u64 rcv_bytes;
+ u64 rcv_pkts;
+};
+
+struct qed_rdma_counters_out_params {
+ u64 pd_count;
+ u64 max_pd;
+ u64 dpi_count;
+ u64 max_dpi;
+ u64 cq_count;
+ u64 max_cq;
+ u64 qp_count;
+ u64 max_qp;
+ u64 tid_count;
+ u64 max_tid;
+};
+
+#define QED_ROCE_TX_HEAD_FAILURE (1)
+#define QED_ROCE_TX_FRAG_FAILURE (2)
+
+struct qed_roce_ll2_header {
+ void *vaddr;
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_buffer {
+ dma_addr_t baddr;
+ size_t len;
+};
+
+struct qed_roce_ll2_packet {
+ struct qed_roce_ll2_header header;
+ int n_seg;
+ struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
+ int roce_mode;
+ enum qed_roce_ll2_tx_dest tx_dest;
+};
+
+struct qed_roce_ll2_tx_params {
+ int reserved;
+};
+
+struct qed_roce_ll2_rx_params {
+ u16 vlan_id;
+ u8 smac[ETH_ALEN];
+ int rc;
+};
+
+struct qed_roce_ll2_cbs {
+ void (*tx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt);
+
+ void (*rx_cb)(void *pdev, struct qed_roce_ll2_packet *pkt,
+ struct qed_roce_ll2_rx_params *params);
+};
+
+struct qed_roce_ll2_params {
+ u16 max_rx_buffers;
+ u16 max_tx_buffers;
+ u16 mtu;
+ u8 mac_address[ETH_ALEN];
+ struct qed_roce_ll2_cbs cbs;
+ void *cb_cookie;
+};
+
+struct qed_roce_ll2_info {
+ u8 handle;
+ struct qed_roce_ll2_cbs cbs;
+ u8 mac_address[ETH_ALEN];
+ void *cb_cookie;
+
+ /* Lock to protect ll2 */
+ struct mutex lock;
+};
+
+enum qed_rdma_type {
+ QED_RDMA_TYPE_ROCE,
+};
+
+struct qed_dev_rdma_info {
+ struct qed_dev_info common;
+ enum qed_rdma_type rdma_type;
+};
+
+struct qed_rdma_ops {
+ const struct qed_common_ops *common;
+
+ int (*fill_dev_info)(struct qed_dev *cdev,
+ struct qed_dev_rdma_info *info);
+ void *(*rdma_get_rdma_ctx)(struct qed_dev *cdev);
+
+ int (*rdma_init)(struct qed_dev *dev,
+ struct qed_rdma_start_in_params *iparams);
+
+ int (*rdma_add_user)(void *rdma_cxt,
+ struct qed_rdma_add_user_out_params *oparams);
+
+ void (*rdma_remove_user)(void *rdma_cxt, u16 dpi);
+ int (*rdma_stop)(void *rdma_cxt);
+ struct qed_rdma_device* (*rdma_query_device)(void *rdma_cxt);
+ struct qed_rdma_port* (*rdma_query_port)(void *rdma_cxt);
+ int (*rdma_get_start_sb)(struct qed_dev *cdev);
+ int (*rdma_get_min_cnq_msix)(struct qed_dev *cdev);
+ void (*rdma_cnq_prod_update)(void *rdma_cxt, u8 cnq_index, u16 prod);
+ int (*rdma_get_rdma_int)(struct qed_dev *cdev,
+ struct qed_int_info *info);
+ int (*rdma_set_rdma_int)(struct qed_dev *cdev, u16 cnt);
+ int (*rdma_alloc_pd)(void *rdma_cxt, u16 *pd);
+ void (*rdma_dealloc_pd)(void *rdma_cxt, u16 pd);
+ int (*rdma_create_cq)(void *rdma_cxt,
+ struct qed_rdma_create_cq_in_params *params,
+ u16 *icid);
+ int (*rdma_destroy_cq)(void *rdma_cxt,
+ struct qed_rdma_destroy_cq_in_params *iparams,
+ struct qed_rdma_destroy_cq_out_params *oparams);
+ struct qed_rdma_qp *
+ (*rdma_create_qp)(void *rdma_cxt,
+ struct qed_rdma_create_qp_in_params *iparams,
+ struct qed_rdma_create_qp_out_params *oparams);
+
+ int (*rdma_modify_qp)(void *roce_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_modify_qp_in_params *iparams);
+
+ int (*rdma_query_qp)(void *rdma_cxt, struct qed_rdma_qp *qp,
+ struct qed_rdma_query_qp_out_params *oparams);
+ int (*rdma_destroy_qp)(void *rdma_cxt, struct qed_rdma_qp *qp);
+ int
+ (*rdma_register_tid)(void *rdma_cxt,
+ struct qed_rdma_register_tid_in_params *iparams);
+ int (*rdma_deregister_tid)(void *rdma_cxt, u32 itid);
+ int (*rdma_alloc_tid)(void *rdma_cxt, u32 *itid);
+ void (*rdma_free_tid)(void *rdma_cxt, u32 itid);
+ int (*roce_ll2_start)(struct qed_dev *cdev,
+ struct qed_roce_ll2_params *params);
+ int (*roce_ll2_stop)(struct qed_dev *cdev);
+ int (*roce_ll2_tx)(struct qed_dev *cdev,
+ struct qed_roce_ll2_packet *packet,
+ struct qed_roce_ll2_tx_params *params);
+ int (*roce_ll2_post_rx_buffer)(struct qed_dev *cdev,
+ struct qed_roce_ll2_buffer *buf,
+ u64 cookie, u8 notify_fw);
+ int (*roce_ll2_set_mac_filter)(struct qed_dev *cdev,
+ u8 *old_mac_address,
+ u8 *new_mac_address);
+ int (*roce_ll2_stats)(struct qed_dev *cdev,
+ struct qed_ll2_stats *stats);
+};
+
+const struct qed_rdma_ops *qed_get_rdma_ops(void);
+
+#endif
diff --git a/include/linux/qed/qede_roce.h b/include/linux/qed/qede_roce.h
new file mode 100644
index 000000000000..99fbe6d55acb
--- /dev/null
+++ b/include/linux/qed/qede_roce.h
@@ -0,0 +1,88 @@
+/* QLogic qedr NIC Driver
+ * Copyright (c) 2015-2016 QLogic Corporation
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and /or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef QEDE_ROCE_H
+#define QEDE_ROCE_H
+
+struct qedr_dev;
+struct qed_dev;
+struct qede_dev;
+
+enum qede_roce_event {
+ QEDE_UP,
+ QEDE_DOWN,
+ QEDE_CHANGE_ADDR,
+ QEDE_CLOSE
+};
+
+struct qede_roce_event_work {
+ struct list_head list;
+ struct work_struct work;
+ void *ptr;
+ enum qede_roce_event event;
+};
+
+struct qedr_driver {
+ unsigned char name[32];
+
+ struct qedr_dev* (*add)(struct qed_dev *, struct pci_dev *,
+ struct net_device *);
+
+ void (*remove)(struct qedr_dev *);
+ void (*notify)(struct qedr_dev *, enum qede_roce_event);
+};
+
+/* APIs for RoCE driver to register callback handlers,
+ * which will be invoked when device is added, removed, ifup, ifdown
+ */
+int qede_roce_register_driver(struct qedr_driver *drv);
+void qede_roce_unregister_driver(struct qedr_driver *drv);
+
+bool qede_roce_supported(struct qede_dev *dev);
+
+#if IS_ENABLED(CONFIG_INFINIBAND_QEDR)
+int qede_roce_dev_add(struct qede_dev *dev);
+void qede_roce_dev_event_open(struct qede_dev *dev);
+void qede_roce_dev_event_close(struct qede_dev *dev);
+void qede_roce_dev_remove(struct qede_dev *dev);
+void qede_roce_event_changeaddr(struct qede_dev *qedr);
+#else
+static inline int qede_roce_dev_add(struct qede_dev *dev)
+{
+ return 0;
+}
+
+static inline void qede_roce_dev_event_open(struct qede_dev *dev) {}
+static inline void qede_roce_dev_event_close(struct qede_dev *dev) {}
+static inline void qede_roce_dev_remove(struct qede_dev *dev) {}
+static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {}
+#endif
+#endif
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index 187991c1f439..7663725faa94 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -28,6 +28,7 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index fd82584acd48..5c132d3188be 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,7 +1,7 @@
/*
* Resizable, Scalable, Concurrent Hash Table
*
- * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2015-2016 Herbert Xu <herbert@gondor.apana.org.au>
* Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
*
@@ -53,6 +53,11 @@ struct rhash_head {
struct rhash_head __rcu *next;
};
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -137,6 +142,7 @@ struct rhashtable_params {
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
@@ -147,12 +153,21 @@ struct rhashtable {
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
+ bool rhlist;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
};
/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
* struct rhashtable_walker - Hash table walker
* @list: List entry on list of walkers
* @tbl: The table that we were walking over
@@ -163,9 +178,10 @@ struct rhashtable_walker {
};
/**
- * struct rhashtable_iter - Hash table iterator, fits into netlink cb
+ * struct rhashtable_iter - Hash table iterator
* @ht: Table to iterate through
* @p: Current pointer
+ * @list: Current hash list pointer
* @walker: Associated rhashtable walker
* @slot: Current slot
* @skip: Number of entries to skip in slot
@@ -173,6 +189,7 @@ struct rhashtable_walker {
struct rhashtable_iter {
struct rhashtable *ht;
struct rhash_head *p;
+ struct rhlist_head *list;
struct rhashtable_walker walker;
unsigned int slot;
unsigned int skip;
@@ -339,13 +356,11 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
int rhashtable_init(struct rhashtable *ht,
const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
-struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
- const void *key,
- struct rhash_head *obj,
- struct bucket_table *old_tbl,
- void **data);
-int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
+void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
+ struct rhash_head *obj);
void rhashtable_walk_enter(struct rhashtable *ht,
struct rhashtable_iter *iter);
@@ -507,6 +522,31 @@ void rhashtable_destroy(struct rhashtable *ht);
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
tbl, hash, member)
+/**
+ * rhl_for_each_rcu - iterate over rcu hash table list
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_rcu(pos, list) \
+ for (pos = list; pos; pos = rcu_dereference_raw(pos->next))
+
+/**
+ * rhl_for_each_entry_rcu - iterate over rcu hash table list of given type
+ * @tpos: the type * to use as a loop cursor.
+ * @pos: the &struct rlist_head to use as a loop cursor.
+ * @list: the head of the list
+ * @member: name of the &struct rlist_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive should be used on the
+ * list returned by rhltable_lookup.
+ */
+#define rhl_for_each_entry_rcu(tpos, pos, list, member) \
+ for (pos = list; pos && rht_entry(tpos, pos, member); \
+ pos = rcu_dereference_raw(pos->next))
+
static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
const void *obj)
{
@@ -516,18 +556,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -539,8 +569,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -549,8 +577,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -559,96 +586,165 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/**
+ * rhltable_lookup - search hash list table
+ * @hlt: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. All matching entries are returned
+ * in a list.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the list of entries that match the given key.
+ */
+static inline struct rhlist_head *rhltable_lookup(
+ struct rhltable *hlt, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(&hlt->ht, key, params);
+
+ return he ? container_of(he, struct rhlist_head, rhead) : NULL;
+}
+
/* Internal function, please use rhashtable_insert_fast() instead. This
* function returns the existing element already in hashes in there is a clash,
* otherwise it returns an error via ERR_PTR().
*/
static inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct rhashtable_compare_arg arg = {
.ht = ht,
.key = key,
};
- struct bucket_table *tbl, *new_tbl;
+ struct rhash_head __rcu **pprev;
+ struct bucket_table *tbl;
struct rhash_head *head;
spinlock_t *lock;
- unsigned int elasticity;
unsigned int hash;
- void *data = NULL;
- int err;
+ int elasticity;
+ void *data;
-restart:
rcu_read_lock();
tbl = rht_dereference_rcu(ht->tbl, ht);
+ hash = rht_head_hashfn(ht, tbl, obj, params);
+ lock = rht_bucket_lock(tbl, hash);
+ spin_lock_bh(lock);
- /* All insertions must grab the oldest table containing
- * the hashed bucket that is yet to be rehashed.
- */
- for (;;) {
- hash = rht_head_hashfn(ht, tbl, obj, params);
- lock = rht_bucket_lock(tbl, hash);
- spin_lock_bh(lock);
-
- if (tbl->rehash <= hash)
- break;
-
+ if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+slow_path:
spin_unlock_bh(lock);
- tbl = rht_dereference_rcu(tbl->future_tbl, ht);
+ rcu_read_unlock();
+ return rhashtable_insert_slow(ht, key, obj);
}
- new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
- if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
- if (!IS_ERR_OR_NULL(tbl))
- goto slow_path;
+ elasticity = ht->elasticity;
+ pprev = &tbl->buckets[hash];
+ rht_for_each(head, tbl, hash) {
+ struct rhlist_head *plist;
+ struct rhlist_head *list;
+
+ elasticity--;
+ if (!key ||
+ (params.obj_cmpfn ?
+ params.obj_cmpfn(&arg, rht_obj(ht, head)) :
+ rhashtable_compare(&arg, rht_obj(ht, head))))
+ continue;
+
+ data = rht_obj(ht, head);
- err = PTR_ERR(tbl);
- if (err == -EEXIST)
- err = 0;
+ if (!rhlist)
+ goto out;
- goto out;
- }
- err = -E2BIG;
- if (unlikely(rht_grow_above_max(ht, tbl)))
- goto out;
+ list = container_of(obj, struct rhlist_head, rhead);
+ plist = container_of(head, struct rhlist_head, rhead);
- if (unlikely(rht_grow_above_100(ht, tbl))) {
-slow_path:
- spin_unlock_bh(lock);
- err = rhashtable_insert_rehash(ht, tbl);
- rcu_read_unlock();
- if (err)
- return ERR_PTR(err);
+ RCU_INIT_POINTER(list->next, plist);
+ head = rht_dereference_bucket(head->next, tbl, hash);
+ RCU_INIT_POINTER(list->rhead.next, head);
+ rcu_assign_pointer(*pprev, obj);
- goto restart;
+ goto good;
}
- err = 0;
- elasticity = ht->elasticity;
- rht_for_each(head, tbl, hash) {
- if (key &&
- unlikely(!(params.obj_cmpfn ?
- params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head))))) {
- data = rht_obj(ht, head);
- goto out;
- }
- if (!--elasticity)
- goto slow_path;
- }
+ if (elasticity <= 0)
+ goto slow_path;
+
+ data = ERR_PTR(-E2BIG);
+ if (unlikely(rht_grow_above_max(ht, tbl)))
+ goto out;
+
+ if (unlikely(rht_grow_above_100(ht, tbl)))
+ goto slow_path;
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
+ if (rhlist) {
+ struct rhlist_head *list;
+
+ list = container_of(obj, struct rhlist_head, rhead);
+ RCU_INIT_POINTER(list->next, NULL);
+ }
rcu_assign_pointer(tbl->buckets[hash], obj);
@@ -656,11 +752,14 @@ slow_path:
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
+good:
+ data = NULL;
+
out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err ? ERR_PTR(err) : data;
+ return data;
}
/**
@@ -685,7 +784,7 @@ static inline int rhashtable_insert_fast(
{
void *ret;
- ret = __rhashtable_insert_fast(ht, NULL, obj, params);
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params, false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -693,6 +792,58 @@ static inline int rhashtable_insert_fast(
}
/**
+ * rhltable_insert_key - insert object into hash list table
+ * @hlt: hash list table
+ * @key: the pointer to the key
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert_key(
+ struct rhltable *hlt, const void *key, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return PTR_ERR(__rhashtable_insert_fast(&hlt->ht, key, &list->rhead,
+ params, true));
+}
+
+/**
+ * rhltable_insert - insert object into hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Will take a per bucket spinlock to protect against mutual mutations
+ * on the same bucket. Multiple insertions may occur in parallel unless
+ * they map to the same bucket lock.
+ *
+ * It is safe to call this function from atomic context.
+ *
+ * Will trigger an automatic deferred table resizing if the size grows
+ * beyond the watermark indicated by grow_decision() which can be passed
+ * to rhashtable_init().
+ */
+static inline int rhltable_insert(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(&hlt->ht, &list->rhead);
+
+ key += params.key_offset;
+
+ return rhltable_insert_key(hlt, key, list, params);
+}
+
+/**
* rhashtable_lookup_insert_fast - lookup and insert object into hash table
* @ht: hash table
* @obj: pointer to hash head inside object
@@ -722,7 +873,8 @@ static inline int rhashtable_lookup_insert_fast(
BUG_ON(ht->p.obj_hashfn);
- ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -759,7 +911,7 @@ static inline int rhashtable_lookup_insert_key(
BUG_ON(!ht->p.obj_hashfn || !key);
- ret = __rhashtable_insert_fast(ht, key, obj, params);
+ ret = __rhashtable_insert_fast(ht, key, obj, params, false);
if (IS_ERR(ret))
return PTR_ERR(ret);
@@ -783,13 +935,14 @@ static inline void *rhashtable_lookup_get_insert_key(
{
BUG_ON(!ht->p.obj_hashfn || !key);
- return __rhashtable_insert_fast(ht, key, obj, params);
+ return __rhashtable_insert_fast(ht, key, obj, params, false);
}
/* Internal function, please use rhashtable_remove_fast() instead */
-static inline int __rhashtable_remove_fast(
+static inline int __rhashtable_remove_fast_one(
struct rhashtable *ht, struct bucket_table *tbl,
- struct rhash_head *obj, const struct rhashtable_params params)
+ struct rhash_head *obj, const struct rhashtable_params params,
+ bool rhlist)
{
struct rhash_head __rcu **pprev;
struct rhash_head *he;
@@ -804,39 +957,66 @@ static inline int __rhashtable_remove_fast(
pprev = &tbl->buckets[hash];
rht_for_each(he, tbl, hash) {
+ struct rhlist_head *list;
+
+ list = container_of(he, struct rhlist_head, rhead);
+
if (he != obj) {
+ struct rhlist_head __rcu **lpprev;
+
pprev = &he->next;
- continue;
+
+ if (!rhlist)
+ continue;
+
+ do {
+ lpprev = &list->next;
+ list = rht_dereference_bucket(list->next,
+ tbl, hash);
+ } while (list && obj != &list->rhead);
+
+ if (!list)
+ continue;
+
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ RCU_INIT_POINTER(*lpprev, list);
+ err = 0;
+ break;
}
- rcu_assign_pointer(*pprev, obj->next);
- err = 0;
+ obj = rht_dereference_bucket(obj->next, tbl, hash);
+ err = 1;
+
+ if (rhlist) {
+ list = rht_dereference_bucket(list->next, tbl, hash);
+ if (list) {
+ RCU_INIT_POINTER(list->rhead.next, obj);
+ obj = &list->rhead;
+ err = 0;
+ }
+ }
+
+ rcu_assign_pointer(*pprev, obj);
break;
}
spin_unlock_bh(lock);
+ if (err > 0) {
+ atomic_dec(&ht->nelems);
+ if (unlikely(ht->p.automatic_shrinking &&
+ rht_shrink_below_30(ht, tbl)))
+ schedule_work(&ht->run_work);
+ err = 0;
+ }
+
return err;
}
-/**
- * rhashtable_remove_fast - remove object from hash table
- * @ht: hash table
- * @obj: pointer to hash head inside object
- * @params: hash table parameters
- *
- * Since the hash chain is single linked, the removal operation needs to
- * walk the bucket chain upon removal. The removal operation is thus
- * considerable slow if the hash table is not correctly sized.
- *
- * Will automatically shrink the table via rhashtable_expand() if the
- * shrink_decision function specified at rhashtable_init() returns true.
- *
- * Returns zero on success, -ENOENT if the entry could not be found.
- */
-static inline int rhashtable_remove_fast(
+/* Internal function, please use rhashtable_remove_fast() instead */
+static inline int __rhashtable_remove_fast(
struct rhashtable *ht, struct rhash_head *obj,
- const struct rhashtable_params params)
+ const struct rhashtable_params params, bool rhlist)
{
struct bucket_table *tbl;
int err;
@@ -850,24 +1030,60 @@ static inline int rhashtable_remove_fast(
* visible then that guarantees the entry to still be in
* the old tbl if it exists.
*/
- while ((err = __rhashtable_remove_fast(ht, tbl, obj, params)) &&
+ while ((err = __rhashtable_remove_fast_one(ht, tbl, obj, params,
+ rhlist)) &&
(tbl = rht_dereference_rcu(tbl->future_tbl, ht)))
;
- if (err)
- goto out;
-
- atomic_dec(&ht->nelems);
- if (unlikely(ht->p.automatic_shrinking &&
- rht_shrink_below_30(ht, tbl)))
- schedule_work(&ht->run_work);
-
-out:
rcu_read_unlock();
return err;
}
+/**
+ * rhashtable_remove_fast - remove object from hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhashtable_remove_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(ht, obj, params, false);
+}
+
+/**
+ * rhltable_remove - remove object from hash list table
+ * @hlt: hash list table
+ * @list: pointer to hash list head inside object
+ * @params: hash table parameters
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * Returns zero on success, -ENOENT if the entry could not be found.
+ */
+static inline int rhltable_remove(
+ struct rhltable *hlt, struct rhlist_head *list,
+ const struct rhashtable_params params)
+{
+ return __rhashtable_remove_fast(&hlt->ht, &list->rhead, params, true);
+}
+
/* Internal function, please use rhashtable_replace_fast() instead */
static inline int __rhashtable_replace_fast(
struct rhashtable *ht, struct bucket_table *tbl,
@@ -958,4 +1174,51 @@ static inline int rhashtable_walk_init(struct rhashtable *ht,
return 0;
}
+/**
+ * rhltable_walk_enter - Initialise an iterator
+ * @hlt: Table to walk over
+ * @iter: Hash table Iterator
+ *
+ * This function prepares a hash table walk.
+ *
+ * Note that if you restart a walk after rhashtable_walk_stop you
+ * may see the same object twice. Also, you may miss objects if
+ * there are removals in between rhashtable_walk_stop and the next
+ * call to rhashtable_walk_start.
+ *
+ * For a completely stable walk you should construct your own data
+ * structure outside the hash table.
+ *
+ * This function may sleep so you must not call it from interrupt
+ * context or with spin locks held.
+ *
+ * You must call rhashtable_walk_exit after this function returns.
+ */
+static inline void rhltable_walk_enter(struct rhltable *hlt,
+ struct rhashtable_iter *iter)
+{
+ return rhashtable_walk_enter(&hlt->ht, iter);
+}
+
+/**
+ * rhltable_free_and_destroy - free elements and destroy hash list table
+ * @hlt: the hash list table to destroy
+ * @free_fn: callback to release resources of element
+ * @arg: pointer passed to free_fn
+ *
+ * See documentation for rhashtable_free_and_destroy.
+ */
+static inline void rhltable_free_and_destroy(struct rhltable *hlt,
+ void (*free_fn)(void *ptr,
+ void *arg),
+ void *arg)
+{
+ return rhashtable_free_and_destroy(&hlt->ht, free_fn, arg);
+}
+
+static inline void rhltable_destroy(struct rhltable *hlt)
+{
+ return rhltable_free_and_destroy(hlt, NULL, NULL);
+}
+
#endif /* _LINUX_RHASHTABLE_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4c5662f05bda..9bf60b556bd2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -676,13 +676,23 @@ struct sk_buff {
*/
kmemcheck_bitfield_begin(flags1);
__u16 queue_mapping;
+
+/* if you move cloned around you also must adapt those constants */
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CLONED_MASK (1 << 7)
+#else
+#define CLONED_MASK 1
+#endif
+#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+
+ __u8 __cloned_offset[0];
__u8 cloned:1,
nohdr:1,
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ __unused:1; /* one bit hole */
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -3075,6 +3085,7 @@ bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
+int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b17cc4830fa6..4a529c984a3f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
static inline void workingset_node_pages_dec(struct radix_tree_node *node)
{
+ VM_BUG_ON(!workingset_node_pages(node));
node->count--;
}
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
{
+ VM_BUG_ON(!workingset_node_shadows(node));
node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
}
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index c723a465125d..a17ae7b85218 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -19,6 +19,7 @@
#include <linux/skbuff.h>
+#include <linux/win_minmax.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -212,7 +213,8 @@ struct tcp_sock {
u8 reord; /* reordering detected */
} rack;
u16 advmss; /* Advertised MSS */
- u8 unused;
+ u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
+ unused:7;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
thin_dupack : 1,/* Fast retransmit on first dupack */
@@ -234,9 +236,7 @@ struct tcp_sock {
u32 mdev_max_us; /* maximal mdev for the last rtt period */
u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
- struct rtt_meas {
- u32 rtt, ts; /* RTT in usec and sampling time in jiffies. */
- } rtt_min[3];
+ struct minmax rtt_min;
u32 packets_out; /* Packets which are "in flight" */
u32 retrans_out; /* Retransmitted packets out */
@@ -268,6 +268,12 @@ struct tcp_sock {
* receiver in Recovery. */
u32 prr_out; /* Total number of pkts sent during Recovery. */
u32 delivered; /* Total data packets delivered incl. rexmits */
+ u32 lost; /* Total data packets lost incl. rexmits */
+ u32 app_limited; /* limited until "delivered" reaches this val */
+ struct skb_mstamp first_tx_mstamp; /* start of window send phase */
+ struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */
+ u32 rate_delivered; /* saved rate sample: packets delivered */
+ u32 rate_interval_us; /* saved rate sample: time elapsed */
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 1b5d1cd796e2..75b4aaf31a9d 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -76,7 +76,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
struct iov_iter *i, unsigned long offset, size_t bytes);
void iov_iter_advance(struct iov_iter *i, size_t bytes);
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_multipages_readable(struct iov_iter *i, size_t bytes);
+#define iov_iter_fault_in_multipages_readable iov_iter_fault_in_readable
size_t iov_iter_single_seg_count(const struct iov_iter *i);
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i);
diff --git a/include/linux/win_minmax.h b/include/linux/win_minmax.h
new file mode 100644
index 000000000000..56569604278f
--- /dev/null
+++ b/include/linux/win_minmax.h
@@ -0,0 +1,37 @@
+/**
+ * lib/minmax.c: windowed min/max tracker by Kathleen Nichols.
+ *
+ */
+#ifndef MINMAX_H
+#define MINMAX_H
+
+#include <linux/types.h>
+
+/* A single data point for our parameterized min-max tracker */
+struct minmax_sample {
+ u32 t; /* time measurement was taken */
+ u32 v; /* value measured */
+};
+
+/* State for the parameterized min-max tracker */
+struct minmax {
+ struct minmax_sample s[3];
+};
+
+static inline u32 minmax_get(const struct minmax *m)
+{
+ return m->s[0].v;
+}
+
+static inline u32 minmax_reset(struct minmax *m, u32 t, u32 meas)
+{
+ struct minmax_sample val = { .t = t, .v = meas };
+
+ m->s[2] = m->s[1] = m->s[0] = val;
+ return m->s[0].v;
+}
+
+u32 minmax_running_max(struct minmax *m, u32 win, u32 t, u32 meas);
+u32 minmax_running_min(struct minmax *m, u32 win, u32 t, u32 meas);
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index dc7854b855f3..fdb5d600e4bb 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -57,8 +57,8 @@ struct cec_devnode {
int minor;
bool registered;
bool unregistered;
- struct mutex fhs_lock;
struct list_head fhs;
+ struct mutex lock;
};
struct cec_adapter;
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9826d3a9464c..f2d072787947 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,9 @@
#ifndef _ADDRCONF_H
#define _ADDRCONF_H
-#define MAX_RTR_SOLICITATIONS 3
+#define MAX_RTR_SOLICITATIONS -1 /* unlimited */
#define RTR_SOLICITATION_INTERVAL (4*HZ)
+#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index bfd1590821d6..0a1e21d7bce1 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -29,7 +29,8 @@
#include <net/sock.h>
#include <linux/seq_file.h>
-#define BT_SUBSYS_VERSION "2.21"
+#define BT_SUBSYS_VERSION 2
+#define BT_SUBSYS_REVISION 22
#ifndef AF_BLUETOOTH
#define AF_BLUETOOTH 31
@@ -371,6 +372,7 @@ void hci_sock_set_flag(struct sock *sk, int nr);
void hci_sock_clear_flag(struct sock *sk, int nr);
int hci_sock_test_flag(struct sock *sk, int nr);
unsigned short hci_sock_get_channel(struct sock *sk);
+u32 hci_sock_get_cookie(struct sock *sk);
int hci_sock_init(void);
void hci_sock_cleanup(void);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 003b25283407..99aa5e5e3100 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -63,6 +63,7 @@
#define HCI_SDIO 6
#define HCI_SPI 7
#define HCI_I2C 8
+#define HCI_SMD 9
/* HCI controller types */
#define HCI_PRIMARY 0x00
@@ -207,7 +208,11 @@ enum {
HCI_MGMT_INDEX_EVENTS,
HCI_MGMT_UNCONF_INDEX_EVENTS,
HCI_MGMT_EXT_INDEX_EVENTS,
- HCI_MGMT_GENERIC_EVENTS,
+ HCI_MGMT_EXT_INFO_EVENTS,
+ HCI_MGMT_OPTION_EVENTS,
+ HCI_MGMT_SETTING_EVENTS,
+ HCI_MGMT_DEV_CLASS_EVENTS,
+ HCI_MGMT_LOCAL_NAME_EVENTS,
HCI_MGMT_OOB_DATA_EVENTS,
};
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index ee7fc47680a1..f00bf667ec33 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -211,6 +211,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u16 appearance;
__u8 dev_class[3];
__u8 major_class;
__u8 minor_class;
@@ -399,7 +400,9 @@ struct hci_dev {
struct delayed_work rpa_expired;
bdaddr_t rpa;
+#if IS_ENABLED(CONFIG_BT_LEDS)
struct led_trigger *power_led;
+#endif
int (*open)(struct hci_dev *hdev);
int (*close)(struct hci_dev *hdev);
@@ -1026,8 +1029,8 @@ int hci_resume_dev(struct hci_dev *hdev);
int hci_reset_dev(struct hci_dev *hdev);
int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb);
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb);
-void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
-void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...);
+__printf(2, 3) void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...);
int hci_dev_open(__u16 dev);
int hci_dev_close(__u16 dev);
int hci_dev_do_close(struct hci_dev *hdev);
@@ -1404,6 +1407,9 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
int flag, struct sock *skip_sk);
void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb);
+void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event,
+ void *data, u16 data_len, ktime_t tstamp,
+ int flag, struct sock *skip_sk);
void hci_sock_dev_event(struct hci_dev *hdev, int event);
@@ -1449,6 +1455,7 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);
#define DISCOV_BREDR_INQUIRY_LEN 0x08
#define DISCOV_LE_RESTART_DELAY msecs_to_jiffies(200) /* msec */
+void mgmt_fill_version_info(void *ver);
int mgmt_new_settings(struct hci_dev *hdev);
void mgmt_index_added(struct hci_dev *hdev);
void mgmt_index_removed(struct hci_dev *hdev);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 587d0131b349..240786b04a46 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -45,6 +45,10 @@ struct hci_mon_hdr {
#define HCI_MON_VENDOR_DIAG 11
#define HCI_MON_SYSTEM_NOTE 12
#define HCI_MON_USER_LOGGING 13
+#define HCI_MON_CTRL_OPEN 14
+#define HCI_MON_CTRL_CLOSE 15
+#define HCI_MON_CTRL_COMMAND 16
+#define HCI_MON_CTRL_EVENT 17
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 7647964b1efa..72a456bbbcd5 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -586,6 +586,24 @@ struct mgmt_rp_get_adv_size_info {
#define MGMT_OP_START_LIMITED_DISCOVERY 0x0041
+#define MGMT_OP_READ_EXT_INFO 0x0042
+#define MGMT_READ_EXT_INFO_SIZE 0
+struct mgmt_rp_read_ext_info {
+ bdaddr_t bdaddr;
+ __u8 version;
+ __le16 manufacturer;
+ __le32 supported_settings;
+ __le32 current_settings;
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
+
+#define MGMT_OP_SET_APPEARANCE 0x0043
+struct mgmt_cp_set_appearance {
+ __u16 appearance;
+} __packed;
+#define MGMT_SET_APPEARANCE_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -800,3 +818,9 @@ struct mgmt_ev_advertising_added {
struct mgmt_ev_advertising_removed {
__u8 instance;
} __packed;
+
+#define MGMT_EV_EXT_INFO_CHANGED 0x0025
+struct mgmt_ev_ext_info_changed {
+ __le16 eir_len;
+ __u8 eir[0];
+} __packed;
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 7556646db2d3..b122196d5a1f 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -143,6 +143,7 @@ struct dsa_port {
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
+ u8 stp_state;
};
struct dsa_switch {
@@ -339,6 +340,7 @@ struct dsa_switch_ops {
void (*port_bridge_leave)(struct dsa_switch *ds, int port);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
+ void (*port_fast_age)(struct dsa_switch *ds, int port);
/*
* VLAN support
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 1c8b6820b694..515352c6280a 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -201,6 +201,7 @@ struct inet6_dev {
struct ipv6_devstat stats;
struct timer_list rs_timer;
+ __s32 rs_interval; /* in jiffies */
__u8 rs_probes;
__u8 addr_gen_mode;
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 49dcad4fe99e..197a30d221e9 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -134,8 +134,8 @@ struct inet_connection_sock {
} icsk_mtup;
u32 icsk_user_timeout;
- u64 icsk_ca_priv[64 / sizeof(u64)];
-#define ICSK_CA_PRIV_SIZE (8 * sizeof(u64))
+ u64 icsk_ca_priv[88 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE (11 * sizeof(u64))
};
#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index 9742b92dc933..bc43c0fcae12 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -219,6 +219,29 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
}
#endif
+#define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff64[i] += snmp_get_cpu_field64( \
+ mib_statistic, \
+ c, stats_list[i].entry, \
+ offset); \
+ } \
+}
+
+#define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
+{ \
+ int i, c; \
+ for_each_possible_cpu(c) { \
+ for (i = 0; stats_list[i].name; i++) \
+ buff[i] += snmp_get_cpu_field( \
+ mib_statistic, \
+ c, stats_list[i].entry); \
+ } \
+}
+
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index d97305d0e71f..e0cd318d5103 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -64,6 +64,9 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
}
void ip6_route_input(struct sk_buff *skb);
+struct dst_entry *ip6_route_input_lookup(struct net *net,
+ struct net_device *dev,
+ struct flowi6 *fl6, int flags);
struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
struct flowi6 *fl6, int flags);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7d4a72e75f33..b9314b48e39f 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -22,6 +22,7 @@
#include <net/fib_rules.h>
#include <net/inetpeer.h>
#include <linux/percpu.h>
+#include <linux/notifier.h>
struct fib_config {
u8 fc_dst_len;
@@ -122,6 +123,7 @@ struct fib_info {
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
#endif
+ unsigned int fib_offload_cnt;
struct rcu_head rcu;
struct fib_nh fib_nh[0];
#define fib_dev fib_nh[0].nh_dev
@@ -173,6 +175,18 @@ struct fib_result_nl {
__be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+static inline void fib_info_offload_inc(struct fib_info *fi)
+{
+ fi->fib_offload_cnt++;
+ fi->fib_flags |= RTNH_F_OFFLOAD;
+}
+
+static inline void fib_info_offload_dec(struct fib_info *fi)
+{
+ if (--fi->fib_offload_cnt == 0)
+ fi->fib_flags &= ~RTNH_F_OFFLOAD;
+}
+
#define FIB_RES_SADDR(net, res) \
((FIB_RES_NH(res).nh_saddr_genid == \
atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
@@ -185,6 +199,33 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
#define FIB_RES_PREFSRC(net, res) ((res).fi->fib_prefsrc ? : \
FIB_RES_SADDR(net, res))
+struct fib_notifier_info {
+ struct net *net;
+};
+
+struct fib_entry_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ u32 dst;
+ int dst_len;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+ u32 tb_id;
+ u32 nlflags;
+};
+
+enum fib_event_type {
+ FIB_EVENT_ENTRY_ADD,
+ FIB_EVENT_ENTRY_DEL,
+ FIB_EVENT_RULE_ADD,
+ FIB_EVENT_RULE_DEL,
+};
+
+int register_fib_notifier(struct notifier_block *nb);
+int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
+ struct fib_notifier_info *info);
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
@@ -196,13 +237,12 @@ struct fib_table {
int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp,
struct fib_result *res, int fib_flags);
-int fib_table_insert(struct fib_table *, struct fib_config *);
-int fib_table_delete(struct fib_table *, struct fib_config *);
+int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
+int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
-int fib_table_flush(struct fib_table *table);
+int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
-void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -315,7 +355,6 @@ static inline int fib_num_tclassid_users(struct net *net)
}
#endif
int fib_unmerge(struct net *net);
-void fib_flush_external(struct net *net);
/* Exported by fib_semantics.c */
int ip_fib_check_default(__be32 gw, struct net_device *dev);
diff --git a/include/net/mpls.h b/include/net/mpls.h
index 5b3b5addfb08..1dbc669b770e 100644
--- a/include/net/mpls.h
+++ b/include/net/mpls.h
@@ -19,21 +19,18 @@
#define MPLS_HLEN 4
+struct mpls_shim_hdr {
+ __be32 label_stack_entry;
+};
+
static inline bool eth_p_mpls(__be16 eth_type)
{
return eth_type == htons(ETH_P_MPLS_UC) ||
eth_type == htons(ETH_P_MPLS_MC);
}
-/*
- * For non-MPLS skbs this will correspond to the network header.
- * For MPLS skbs it will be before the network_header as the MPLS
- * label stack lies between the end of the mac header and the network
- * header. That is, for MPLS skbs the end of the mac header
- * is the top of the MPLS label stack.
- */
-static inline unsigned char *skb_mpls_header(struct sk_buff *skb)
+static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
{
- return skb_mac_header(skb) + skb->mac_len;
+ return (struct mpls_shim_hdr *)skb_network_header(skb);
}
#endif
diff --git a/include/net/ncsi.h b/include/net/ncsi.h
index 1dbf42f79750..68680baac0fd 100644
--- a/include/net/ncsi.h
+++ b/include/net/ncsi.h
@@ -31,6 +31,7 @@ struct ncsi_dev {
struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
void (*notifier)(struct ncsi_dev *nd));
int ncsi_start_dev(struct ncsi_dev *nd);
+void ncsi_stop_dev(struct ncsi_dev *nd);
void ncsi_unregister_dev(struct ncsi_dev *nd);
#else /* !CONFIG_NET_NCSI */
static inline struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
@@ -44,6 +45,10 @@ static inline int ncsi_start_dev(struct ncsi_dev *nd)
return -ENOTTY;
}
+static void ncsi_stop_dev(struct ncsi_dev *nd)
+{
+}
+
static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
{
}
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index e8d1448425a7..0b0c35c37125 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -15,6 +15,12 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
void nf_bridge_update_protocol(struct sk_buff *skb);
+int br_nf_hook_thresh(unsigned int hook, struct net *net, struct sock *sk,
+ struct sk_buff *skb, struct net_device *indev,
+ struct net_device *outdev,
+ int (*okfn)(struct net *, struct sock *,
+ struct sk_buff *));
+
static inline struct nf_bridge_info *
nf_bridge_info_get(const struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index cdc920b4c4c2..8992e4229da9 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -63,10 +63,6 @@ struct nf_conntrack_l3proto {
size_t nla_size;
-#ifdef CONFIG_SYSCTL
- const char *ctl_table_path;
-#endif /* CONFIG_SYSCTL */
-
/* Init l3proto pernet data */
int (*init_net)(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 6793614e6502..e6937318546c 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -27,6 +27,20 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
#endif
}
+static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
+ const struct nf_conn *tmpl)
+{
+ if (tmpl && nfct_synproxy(tmpl)) {
+ if (!nfct_seqadj_ext_add(ct))
+ return false;
+
+ if (!nfct_synproxy_ext_add(ct))
+ return false;
+ }
+
+ return true;
+}
+
struct synproxy_stats {
unsigned int syn_received;
unsigned int cookie_invalid;
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index ee07dc8b0a7b..309cd267be4f 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -2,15 +2,10 @@
#define _NF_LOG_H
#include <linux/netfilter.h>
+#include <linux/netfilter/nf_log.h>
-/* those NF_LOG_* defines and struct nf_loginfo are legacy definitios that will
- * disappear once iptables is replaced with pkttables. Please DO NOT use them
- * for any new code! */
-#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
-#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
-#define NF_LOG_IPOPT 0x04 /* Log IP options */
-#define NF_LOG_UID 0x08 /* Log UID owning local socket */
-#define NF_LOG_MASK 0x0f
+/* Log tcp sequence, tcp options, ip options and uid owning local socket */
+#define NF_LOG_DEFAULT_MASK 0x0f
/* This flag indicates that copy_len field in nf_loginfo is set */
#define NF_LOG_F_COPY_LEN 0x1
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 0dbce55437f2..2280cfe86c56 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -11,7 +11,6 @@ struct nf_queue_entry {
struct sk_buff *skb;
unsigned int id;
- struct nf_hook_ops *elem;
struct nf_hook_state state;
u16 size; /* sizeof(entry) + saved route keys */
@@ -22,10 +21,10 @@ struct nf_queue_entry {
/* Packet queuing */
struct nf_queue_handler {
- int (*outfn)(struct nf_queue_entry *entry,
- unsigned int queuenum);
- void (*nf_hook_drop)(struct net *net,
- struct nf_hook_ops *ops);
+ int (*outfn)(struct nf_queue_entry *entry,
+ unsigned int queuenum);
+ void (*nf_hook_drop)(struct net *net,
+ const struct nf_hook_entry *hooks);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
@@ -41,23 +40,19 @@ static inline void init_hashrandom(u32 *jhash_initval)
*jhash_initval = prandom_u32();
}
-static inline u32 hash_v4(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v4(const struct iphdr *iph, u32 initval)
{
- const struct iphdr *iph = ip_hdr(skb);
-
/* packets in either direction go into same queue */
if ((__force u32)iph->saddr < (__force u32)iph->daddr)
return jhash_3words((__force u32)iph->saddr,
- (__force u32)iph->daddr, iph->protocol, jhash_initval);
+ (__force u32)iph->daddr, iph->protocol, initval);
return jhash_3words((__force u32)iph->daddr,
- (__force u32)iph->saddr, iph->protocol, jhash_initval);
+ (__force u32)iph->saddr, iph->protocol, initval);
}
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
+static inline u32 hash_v6(const struct ipv6hdr *ip6h, u32 initval)
{
- const struct ipv6hdr *ip6h = ipv6_hdr(skb);
u32 a, b, c;
if ((__force u32)ip6h->saddr.s6_addr32[3] <
@@ -75,20 +70,50 @@ static inline u32 hash_v6(const struct sk_buff *skb, u32 jhash_initval)
else
c = (__force u32) ip6h->daddr.s6_addr32[1];
- return jhash_3words(a, b, c, jhash_initval);
+ return jhash_3words(a, b, c, initval);
+}
+
+static inline u32 hash_bridge(const struct sk_buff *skb, u32 initval)
+{
+ struct ipv6hdr *ip6h, _ip6h;
+ struct iphdr *iph, _iph;
+
+ switch (eth_hdr(skb)->h_proto) {
+ case htons(ETH_P_IP):
+ iph = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*iph), &_iph);
+ if (iph)
+ return hash_v4(iph, initval);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb),
+ sizeof(*ip6h), &_ip6h);
+ if (ip6h)
+ return hash_v6(ip6h, initval);
+ break;
+ }
+
+ return 0;
}
-#endif
static inline u32
nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
- u32 jhash_initval)
+ u32 initval)
{
- if (family == NFPROTO_IPV4)
- queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32;
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
- else if (family == NFPROTO_IPV6)
- queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32;
-#endif
+ switch (family) {
+ case NFPROTO_IPV4:
+ queue += reciprocal_scale(hash_v4(ip_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_IPV6:
+ queue += reciprocal_scale(hash_v6(ipv6_hdr(skb), initval),
+ queues_total);
+ break;
+ case NFPROTO_BRIDGE:
+ queue += reciprocal_scale(hash_bridge(skb, initval),
+ queues_total);
+ break;
+ }
return queue;
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 8972468bc94b..5031e072567b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -19,6 +19,7 @@ struct nft_pktinfo {
const struct net_device *out;
u8 pf;
u8 hook;
+ bool tprot_set;
u8 tprot;
/* for x_tables compatibility */
struct xt_action_param xt;
@@ -36,6 +37,23 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->pf = pkt->xt.family = state->pf;
}
+static inline void nft_set_pktinfo_proto_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ pkt->tprot_set = false;
+ pkt->tprot = 0;
+ pkt->xt.thoff = 0;
+ pkt->xt.fragoff = 0;
+}
+
+static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
/**
* struct nft_verdict - nf_tables verdict
*
@@ -127,6 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
+unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest);
unsigned int nft_parse_register(const struct nlattr *attr);
int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg);
diff --git a/include/net/netfilter/nf_tables_bridge.h b/include/net/netfilter/nf_tables_bridge.h
deleted file mode 100644
index 511fb79f6dad..000000000000
--- a/include/net/netfilter/nf_tables_bridge.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _NET_NF_TABLES_BRIDGE_H
-#define _NET_NF_TABLES_BRIDGE_H
-
-int nft_bridge_iphdr_validate(struct sk_buff *skb);
-int nft_bridge_ip6hdr_validate(struct sk_buff *skb);
-
-#endif /* _NET_NF_TABLES_BRIDGE_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index a9060dd99db7..00f4f6b1b1ba 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -28,6 +28,9 @@ extern const struct nft_expr_ops nft_cmp_fast_ops;
int nft_cmp_module_init(void);
void nft_cmp_module_exit(void);
+int nft_range_module_init(void);
+void nft_range_module_exit(void);
+
int nft_lookup_module_init(void);
void nft_lookup_module_exit(void);
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ca6ef6bf775e..968f00b82fb5 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -14,11 +14,54 @@ nft_set_pktinfo_ipv4(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
ip = ip_hdr(pkt->skb);
+ pkt->tprot_set = true;
pkt->tprot = ip->protocol;
pkt->xt.thoff = ip_hdrlen(pkt->skb);
pkt->xt.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
}
+static inline int
+__nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct iphdr *iph, _iph;
+ u32 len, thoff;
+
+ iph = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*iph),
+ &_iph);
+ if (!iph)
+ return -1;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ return -1;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len)
+ return -1;
+ else if (len < thoff)
+ return -1;
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+}
+
+static inline void
+nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv4_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+}
+
extern struct nft_af_info nft_af_ipv4;
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index 8ad39a6a5fe1..d150b5066201 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -4,7 +4,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ipv6.h>
-static inline int
+static inline void
nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
struct sk_buff *skb,
const struct nf_hook_state *state)
@@ -15,15 +15,64 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt,
nft_set_pktinfo(pkt, skb, state);
protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
- /* If malformed, drop it */
+ if (protohdr < 0) {
+ nft_set_pktinfo_proto_unspec(pkt, skb);
+ return;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+}
+
+static inline int
+__nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h, _ip6h;
+ unsigned int thoff = 0;
+ unsigned short frag_off;
+ int protohdr;
+ u32 pkt_len;
+
+ ip6h = skb_header_pointer(skb, skb_network_offset(skb), sizeof(*ip6h),
+ &_ip6h);
+ if (!ip6h)
+ return -1;
+
+ if (ip6h->version != 6)
+ return -1;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len)
+ return -1;
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL);
if (protohdr < 0)
return -1;
+ pkt->tprot_set = true;
pkt->tprot = protohdr;
pkt->xt.thoff = thoff;
pkt->xt.fragoff = frag_off;
return 0;
+#else
+ return -1;
+#endif
+}
+
+static inline void
+nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ nft_set_pktinfo(pkt, skb, state);
+ if (__nft_set_pktinfo_ipv6_validate(pkt, skb, state) < 0)
+ nft_set_pktinfo_proto_unspec(pkt, skb);
}
extern struct nft_af_info nft_af_ipv6;
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
index 36d723579af2..58487b1cc99a 100644
--- a/include/net/netns/netfilter.h
+++ b/include/net/netns/netfilter.h
@@ -16,6 +16,6 @@ struct netns_nf {
#ifdef CONFIG_SYSCTL
struct ctl_table_header *nf_log_dir_header;
#endif
- struct list_head hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+ struct nf_hook_entry __rcu *hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
};
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a459be5fe1c2..767b03a3fe67 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -123,7 +123,7 @@ static inline void tcf_exts_to_list(const struct tcf_exts *exts,
for (i = 0; i < exts->nr_actions; i++) {
struct tc_action *a = exts->actions[i];
- list_add(&a->list, actions);
+ list_add_tail(&a->list, actions);
}
#endif
}
@@ -486,4 +486,20 @@ struct tc_cls_matchall_offload {
unsigned long cookie;
};
+enum tc_clsbpf_command {
+ TC_CLSBPF_ADD,
+ TC_CLSBPF_REPLACE,
+ TC_CLSBPF_DESTROY,
+ TC_CLSBPF_STATS,
+};
+
+struct tc_cls_bpf_offload {
+ enum tc_clsbpf_command command;
+ struct tcf_exts *exts;
+ struct bpf_prog *prog;
+ const char *name;
+ bool exts_integrated;
+ u32 gen_flags;
+};
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 52a2015667b4..e6aa0a249672 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -36,6 +36,14 @@ struct qdisc_size_table {
u16 data[];
};
+/* similar to sk_buff_head, but skb->prev pointer is undefined. */
+struct qdisc_skb_head {
+ struct sk_buff *head;
+ struct sk_buff *tail;
+ __u32 qlen;
+ spinlock_t lock;
+};
+
struct Qdisc {
int (*enqueue)(struct sk_buff *skb,
struct Qdisc *sch,
@@ -76,7 +84,7 @@ struct Qdisc {
* For performance sake on SMP, we put highly modified fields at the end
*/
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
- struct sk_buff_head q;
+ struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
seqcount_t running;
struct gnet_stats_queue qstats;
@@ -600,10 +608,27 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
+{
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
+}
+
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
- struct sk_buff_head *list)
+ struct qdisc_skb_head *qh)
{
- __skb_queue_tail(list, skb);
+ struct sk_buff *last = qh->tail;
+
+ if (last) {
+ skb->next = NULL;
+ last->next = skb;
+ qh->tail = skb;
+ } else {
+ qh->tail = skb;
+ qh->head = skb;
+ }
+ qh->qlen++;
qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS;
@@ -614,14 +639,16 @@ static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
return __qdisc_enqueue_tail(skb, sch, &sch->q);
}
-static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
- struct sk_buff_head *list)
+static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = qh->head;
if (likely(skb != NULL)) {
- qdisc_qstats_backlog_dec(sch, skb);
- qdisc_bstats_update(sch, skb);
+ qh->head = skb->next;
+ qh->qlen--;
+ if (qh->head == NULL)
+ qh->tail = NULL;
+ skb->next = NULL;
}
return skb;
@@ -629,7 +656,14 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
{
- return __qdisc_dequeue_head(sch, &sch->q);
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+ if (likely(skb != NULL)) {
+ qdisc_qstats_backlog_dec(sch, skb);
+ qdisc_bstats_update(sch, skb);
+ }
+
+ return skb;
}
/* Instead of calling kfree_skb() while root qdisc lock is held,
@@ -642,10 +676,10 @@ static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
}
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff_head *list,
+ struct qdisc_skb_head *qh,
struct sk_buff **to_free)
{
- struct sk_buff *skb = __skb_dequeue(list);
+ struct sk_buff *skb = __qdisc_dequeue_head(qh);
if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb);
@@ -666,7 +700,9 @@ static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
- return skb_peek(&sch->q);
+ const struct qdisc_skb_head *qh = &sch->q;
+
+ return qh->head;
}
/* generic pseudo peek method for non-work-conserving qdisc */
@@ -701,15 +737,19 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
return skb;
}
-static inline void __qdisc_reset_queue(struct sk_buff_head *list)
+static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
{
/*
* We do not know the backlog in bytes of this list, it
* is up to the caller to correct it
*/
- if (!skb_queue_empty(list)) {
- rtnl_kfree_skbs(list->next, list->prev);
- __skb_queue_head_init(list);
+ ASSERT_RTNL();
+ if (qh->qlen) {
+ rtnl_kfree_skbs(qh->head, qh->tail);
+
+ qh->head = NULL;
+ qh->tail = NULL;
+ qh->qlen = 0;
}
}
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 632e205ca54b..87a7f42e7639 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -83,9 +83,9 @@
#endif
/* Round an int up to the next multiple of 4. */
-#define WORD_ROUND(s) (((s)+3)&~3)
+#define SCTP_PAD4(s) (((s)+3)&~3)
/* Truncate to the previous multiple of 4. */
-#define WORD_TRUNC(s) ((s)&~3)
+#define SCTP_TRUNC4(s) ((s)&~3)
/*
* Function declarations.
@@ -433,7 +433,7 @@ static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
if (asoc->user_frag)
frag = min_t(int, frag, asoc->user_frag);
- frag = WORD_TRUNC(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
+ frag = SCTP_TRUNC4(min_t(int, frag, SCTP_MAX_CHUNK_LEN));
return frag;
}
@@ -462,7 +462,7 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
for (pos.v = chunk->member;\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
- pos.v += WORD_ROUND(ntohs(pos.p->length)))
+ pos.v += SCTP_PAD4(ntohs(pos.p->length)))
#define sctp_walk_errors(err, chunk_hdr)\
_sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
@@ -472,7 +472,7 @@ for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
- err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
+ err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
#define sctp_walk_fwdtsn(pos, chunk)\
_sctp_walk_fwdtsn((pos), (chunk), ntohs((chunk)->chunk_hdr->length) - sizeof(struct sctp_fwdtsn_chunk))
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index efc01743b9d6..ca6c971dd74a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -307,85 +307,27 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk)
}
/* Compare two TSNs */
+#define TSN_lt(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) < 0))
-/* RFC 1982 - Serial Number Arithmetic
- *
- * 2. Comparison
- * Then, s1 is said to be equal to s2 if and only if i1 is equal to i2,
- * in all other cases, s1 is not equal to s2.
- *
- * s1 is said to be less than s2 if, and only if, s1 is not equal to s2,
- * and
- *
- * (i1 < i2 and i2 - i1 < 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 > 2^(SERIAL_BITS - 1))
- *
- * s1 is said to be greater than s2 if, and only if, s1 is not equal to
- * s2, and
- *
- * (i1 < i2 and i2 - i1 > 2^(SERIAL_BITS - 1)) or
- * (i1 > i2 and i1 - i2 < 2^(SERIAL_BITS - 1))
- */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on TSNs in this document SHOULD use Serial
- * Number Arithmetic as defined in [RFC1982] where SERIAL_BITS = 32.
- */
-
-enum {
- TSN_SIGN_BIT = (1<<31)
-};
-
-static inline int TSN_lt(__u32 s, __u32 t)
-{
- return ((s) - (t)) & TSN_SIGN_BIT;
-}
-
-static inline int TSN_lte(__u32 s, __u32 t)
-{
- return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT);
-}
+#define TSN_lte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((a) - (b)) <= 0))
/* Compare two SSNs */
-
-/*
- * RFC 2960
- * 1.6 Serial Number Arithmetic
- *
- * Comparisons and arithmetic on Stream Sequence Numbers in this document
- * SHOULD use Serial Number Arithmetic as defined in [RFC1982] where
- * SERIAL_BITS = 16.
- */
-enum {
- SSN_SIGN_BIT = (1<<15)
-};
-
-static inline int SSN_lt(__u16 s, __u16 t)
-{
- return ((s) - (t)) & SSN_SIGN_BIT;
-}
-
-static inline int SSN_lte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT);
-}
-
-/*
- * ADDIP 3.1.1
- * The valid range of Serial Number is from 0 to 4294967295 (2**32 - 1). Serial
- * Numbers wrap back to 0 after reaching 4294967295.
- */
-enum {
- ADDIP_SERIAL_SIGN_BIT = (1<<31)
-};
-
-static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t)
-{
- return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT);
-}
+#define SSN_lt(a,b) \
+ (typecheck(__u16, a) && \
+ typecheck(__u16, b) && \
+ ((__s16)((a) - (b)) < 0))
+
+/* ADDIP 3.1.1 */
+#define ADDIP_SERIAL_gte(a,b) \
+ (typecheck(__u32, a) && \
+ typecheck(__u32, b) && \
+ ((__s32)((b) - (a)) <= 0))
/* Check VTAG of the packet matches the sender's own tag. */
static inline int
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 8693dc452a7f..11c3bf262a85 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -555,6 +555,9 @@ struct sctp_chunk {
atomic_t refcnt;
+ /* How many times this chunk have been sent, for prsctp RTX policy */
+ int sent_count;
+
/* This is our link to the per-transport transmitted list. */
struct list_head transmitted_list;
@@ -604,16 +607,6 @@ struct sctp_chunk {
/* This needs to be recoverable for SCTP_SEND_FAILED events. */
struct sctp_sndrcvinfo sinfo;
- /* We use this field to record param for prsctp policies,
- * for TTL policy, it is the time_to_drop of this chunk,
- * for RTX policy, it is the max_sent_count of this chunk,
- * for PRIO policy, it is the priority of this chunk.
- */
- unsigned long prsctp_param;
-
- /* How many times this chunk have been sent, for prsctp RTX policy */
- int sent_count;
-
/* Which association does this belong to? */
struct sctp_association *asoc;
diff --git a/include/net/sock.h b/include/net/sock.h
index c797c57f4d9f..ebf75db08e06 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1339,6 +1339,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
if (!sk_has_account(sk))
return;
sk->sk_forward_alloc += size;
+
+ /* Avoid a possible overflow.
+ * TCP send queues can make this happen, if sk_mem_reclaim()
+ * is not called and more than 2 GBytes are released at once.
+ *
+ * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+ * no need to hold that much forward allocation anyway.
+ */
+ if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+ __sk_mem_reclaim(sk, 1 << 20);
}
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 729fe1534160..eba80c4fc56f 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -68,7 +68,6 @@ struct switchdev_attr {
enum switchdev_obj_id {
SWITCHDEV_OBJ_ID_UNDEFINED,
SWITCHDEV_OBJ_ID_PORT_VLAN,
- SWITCHDEV_OBJ_ID_IPV4_FIB,
SWITCHDEV_OBJ_ID_PORT_FDB,
SWITCHDEV_OBJ_ID_PORT_MDB,
};
@@ -92,21 +91,6 @@ struct switchdev_obj_port_vlan {
#define SWITCHDEV_OBJ_PORT_VLAN(obj) \
container_of(obj, struct switchdev_obj_port_vlan, obj)
-/* SWITCHDEV_OBJ_ID_IPV4_FIB */
-struct switchdev_obj_ipv4_fib {
- struct switchdev_obj obj;
- u32 dst;
- int dst_len;
- struct fib_info *fi;
- u8 tos;
- u8 type;
- u32 nlflags;
- u32 tb_id;
-};
-
-#define SWITCHDEV_OBJ_IPV4_FIB(obj) \
- container_of(obj, struct switchdev_obj_ipv4_fib, obj)
-
/* SWITCHDEV_OBJ_ID_PORT_FDB */
struct switchdev_obj_port_fdb {
struct switchdev_obj obj;
@@ -209,11 +193,6 @@ int switchdev_port_bridge_setlink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
int switchdev_port_bridge_dellink(struct net_device *dev,
struct nlmsghdr *nlh, u16 flags);
-int switchdev_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 nlflags, u32 tb_id);
-int switchdev_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id);
-void switchdev_fib_ipv4_abort(struct fib_info *fi);
int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr,
u16 vid, u16 nlm_flags);
@@ -304,25 +283,6 @@ static inline int switchdev_port_bridge_dellink(struct net_device *dev,
return -EOPNOTSUPP;
}
-static inline int switchdev_fib_ipv4_add(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type,
- u32 nlflags, u32 tb_id)
-{
- return 0;
-}
-
-static inline int switchdev_fib_ipv4_del(u32 dst, int dst_len,
- struct fib_info *fi,
- u8 tos, u8 type, u32 tb_id)
-{
- return 0;
-}
-
-static inline void switchdev_fib_ipv4_abort(struct fib_info *fi)
-{
-}
-
static inline int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
const unsigned char *addr,
diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
index 5164bd7a38fb..9fd2bea0a6e0 100644
--- a/include/net/tc_act/tc_ife.h
+++ b/include/net/tc_act/tc_ife.h
@@ -50,9 +50,11 @@ int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen,
int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp);
int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi);
+int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi);
int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi);
int ife_validate_meta_u32(void *val, int len);
int ife_validate_meta_u16(void *val, int len);
+int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi);
void ife_release_meta_gen(struct tcf_meta_info *mi);
int register_ife_op(struct tcf_meta_ops *mops);
int unregister_ife_op(struct tcf_meta_ops *mops);
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 6b835889ea30..48cca321ee6c 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,7 @@
#define __NET_TC_VLAN_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_vlan.h>
#define VLAN_F_POP 0x1
#define VLAN_F_PUSH 0x2
@@ -24,4 +25,28 @@ struct tcf_vlan {
};
#define to_vlan(a) ((struct tcf_vlan *)a)
+static inline bool is_tcf_vlan(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_VLAN)
+ return true;
+#endif
+ return false;
+}
+
+static inline u32 tcf_vlan_action(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_action;
+}
+
+static inline u16 tcf_vlan_push_vid(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_vid;
+}
+
+static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_proto;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index fdfbedd61c67..f83b7f220a65 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -533,6 +533,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
#endif
/* tcp_output.c */
+u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
+ int min_tso_segs);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
@@ -671,7 +673,7 @@ static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
/* Minimum RTT in usec. ~0 means not available. */
static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
{
- return tp->rtt_min[0].rtt;
+ return minmax_get(&tp->rtt_min);
}
/* Compute the actual receive window we are currently advertising.
@@ -763,8 +765,16 @@ struct tcp_skb_cb {
__u32 ack_seq; /* Sequence number ACK'd */
union {
struct {
- /* There is space for up to 20 bytes */
- __u32 in_flight;/* Bytes in flight when packet sent */
+ /* There is space for up to 24 bytes */
+ __u32 in_flight:30,/* Bytes in flight at transmit */
+ is_app_limited:1, /* cwnd not fully used? */
+ unused:1;
+ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
+ __u32 delivered;
+ /* start of send pipeline phase */
+ struct skb_mstamp first_tx_mstamp;
+ /* when we reached the "delivered" count */
+ struct skb_mstamp delivered_mstamp;
} tx; /* only used for outgoing skbs */
union {
struct inet_skb_parm h4;
@@ -860,6 +870,27 @@ struct ack_sample {
u32 in_flight;
};
+/* A rate sample measures the number of (original/retransmitted) data
+ * packets delivered "delivered" over an interval of time "interval_us".
+ * The tcp_rate.c code fills in the rate sample, and congestion
+ * control modules that define a cong_control function to run at the end
+ * of ACK processing can optionally chose to consult this sample when
+ * setting cwnd and pacing rate.
+ * A sample is invalid if "delivered" or "interval_us" is negative.
+ */
+struct rate_sample {
+ struct skb_mstamp prior_mstamp; /* starting timestamp for interval */
+ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
+ s32 delivered; /* number of packets delivered over interval */
+ long interval_us; /* time for tp->delivered to incr "delivered" */
+ long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
+ int losses; /* number of packets marked lost upon ACK */
+ u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
+ u32 prior_in_flight; /* in flight before this ACK */
+ bool is_app_limited; /* is sample from packet with bubble in pipe? */
+ bool is_retrans; /* is sample from retransmission? */
+};
+
struct tcp_congestion_ops {
struct list_head list;
u32 key;
@@ -884,6 +915,14 @@ struct tcp_congestion_ops {
u32 (*undo_cwnd)(struct sock *sk);
/* hook for packet ack accounting (optional) */
void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
+ /* suggest number of segments for each skb to transmit (optional) */
+ u32 (*tso_segs_goal)(struct sock *sk);
+ /* returns the multiplier used in tcp_sndbuf_expand (optional) */
+ u32 (*sndbuf_expand)(struct sock *sk);
+ /* call when packets are delivered to update cwnd and pacing rate,
+ * after all the ca_state processing. (optional)
+ */
+ void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
/* get info for inet_diag (optional) */
size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info);
@@ -946,6 +985,14 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
icsk->icsk_ca_ops->cwnd_event(sk, event);
}
+/* From tcp_rate.c */
+void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
+void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
+ struct rate_sample *rs);
+void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
+ struct skb_mstamp *now, struct rate_sample *rs);
+void tcp_rate_check_app_limited(struct sock *sk);
+
/* These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows.
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d2fdd6d70959..31947b9c21d6 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1540,8 +1540,10 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
int xfrm6_extract_header(struct sk_buff *skb);
int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
-int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
+int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
+ struct ip6_tnl *t);
int xfrm6_transport_finish(struct sk_buff *skb, int async);
+int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
int xfrm6_rcv(struct sk_buff *skb);
int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
xfrm_address_t *saddr, u8 proto);
diff --git a/include/rxrpc/packet.h b/include/rxrpc/packet.h
index fd6eb3a60a8c..703a64b4681a 100644
--- a/include/rxrpc/packet.h
+++ b/include/rxrpc/packet.h
@@ -123,6 +123,7 @@ struct rxrpc_ackpacket {
#define RXRPC_ACK_PING_RESPONSE 7 /* response to RXRPC_ACK_PING */
#define RXRPC_ACK_DELAY 8 /* nothing happened since received packet */
#define RXRPC_ACK_IDLE 9 /* ACK due to fully received ACK window */
+#define RXRPC_ACK__INVALID 10 /* Representation of invalid ACK reason */
uint8_t nAcks; /* number of ACKs */
#define RXRPC_MAXACKS 255
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 0dee7afa93d6..7e4cd53139ed 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -771,12 +771,9 @@ static inline int scsi_host_in_recovery(struct Scsi_Host *shost)
shost->tmf_in_progress;
}
-extern bool scsi_use_blk_mq;
-
static inline bool shost_use_blk_mq(struct Scsi_Host *shost)
{
- return scsi_use_blk_mq;
-
+ return shost->use_blk_mq;
}
extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index ea3b10ed91a8..0383e5e9a0f3 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -16,6 +16,66 @@
#include <linux/tracepoint.h>
+TRACE_EVENT(rxrpc_conn,
+ TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+ TP_ARGS(conn, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(int, op )
+ __field(int, usage )
+ __field(const void *, where )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+ ),
+
+ TP_printk("C=%p %s u=%d sp=%pSR",
+ __entry->conn,
+ rxrpc_conn_traces[__entry->op],
+ __entry->usage,
+ __entry->where)
+ );
+
+TRACE_EVENT(rxrpc_client,
+ TP_PROTO(struct rxrpc_connection *conn, int channel,
+ enum rxrpc_client_trace op),
+
+ TP_ARGS(conn, channel, op),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_connection *, conn )
+ __field(u32, cid )
+ __field(int, channel )
+ __field(int, usage )
+ __field(enum rxrpc_client_trace, op )
+ __field(enum rxrpc_conn_cache_state, cs )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn;
+ __entry->channel = channel;
+ __entry->usage = atomic_read(&conn->usage);
+ __entry->op = op;
+ __entry->cid = conn->proto.cid;
+ __entry->cs = conn->cache_state;
+ ),
+
+ TP_printk("C=%p h=%2d %s %s i=%08x u=%d",
+ __entry->conn,
+ __entry->channel,
+ rxrpc_client_traces[__entry->op],
+ rxrpc_conn_cache_states[__entry->cs],
+ __entry->cid,
+ __entry->usage)
+ );
+
TRACE_EVENT(rxrpc_call,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
int usage, const void *where, const void *aux),
@@ -47,14 +107,14 @@ TRACE_EVENT(rxrpc_call,
);
TRACE_EVENT(rxrpc_skb,
- TP_PROTO(struct sk_buff *skb, int op, int usage, int mod_count,
- const void *where),
+ TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
+ int usage, int mod_count, const void *where),
TP_ARGS(skb, op, usage, mod_count, where),
TP_STRUCT__entry(
__field(struct sk_buff *, skb )
- __field(int, op )
+ __field(enum rxrpc_skb_trace, op )
__field(int, usage )
__field(int, mod_count )
__field(const void *, where )
@@ -70,11 +130,7 @@ TRACE_EVENT(rxrpc_skb,
TP_printk("s=%p %s u=%d m=%d p=%pSR",
__entry->skb,
- (__entry->op == 0 ? "NEW" :
- __entry->op == 1 ? "SEE" :
- __entry->op == 2 ? "GET" :
- __entry->op == 3 ? "FRE" :
- "PUR"),
+ rxrpc_skb_traces[__entry->op],
__entry->usage,
__entry->mod_count,
__entry->where)
@@ -93,11 +149,12 @@ TRACE_EVENT(rxrpc_rx_packet,
memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
),
- TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x",
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s",
__entry->hdr.epoch, __entry->hdr.cid,
__entry->hdr.callNumber, __entry->hdr.serviceId,
__entry->hdr.serial, __entry->hdr.seq,
- __entry->hdr.type, __entry->hdr.flags)
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
);
TRACE_EVENT(rxrpc_rx_done,
@@ -147,6 +204,421 @@ TRACE_EVENT(rxrpc_abort,
__entry->abort_code, __entry->error, __entry->why)
);
+TRACE_EVENT(rxrpc_transmit,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
+
+ TP_ARGS(call, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_transmit_trace, why )
+ __field(rxrpc_seq_t, tx_hard_ack )
+ __field(rxrpc_seq_t, tx_top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->tx_hard_ack = call->tx_hard_ack;
+ __entry->tx_top = call->tx_top;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_transmit_traces[__entry->why],
+ __entry->tx_hard_ack + 1,
+ __entry->tx_top - __entry->tx_hard_ack)
+ );
+
+TRACE_EVENT(rxrpc_rx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t first, u8 reason, u8 n_acks),
+
+ TP_ARGS(call, first, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, first )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->first = first;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk("c=%p %s f=%08x n=%u",
+ __entry->call,
+ rxrpc_ack_names[__entry->reason],
+ __entry->first,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_tx_data,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
+
+ TP_ARGS(call, seq, serial, flags, retrans, lose),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, flags )
+ __field(bool, retrans )
+ __field(bool, lose )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->serial = serial;
+ __entry->flags = flags;
+ __entry->retrans = retrans;
+ __entry->lose = lose;
+ ),
+
+ TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
+ __entry->call,
+ __entry->serial,
+ __entry->seq,
+ __entry->flags,
+ __entry->retrans ? " *RETRANS*" : "",
+ __entry->lose ? " *LOSE*" : "")
+ );
+
+TRACE_EVENT(rxrpc_tx_ack,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
+ u8 reason, u8 n_acks),
+
+ TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, ack_first )
+ __field(rxrpc_serial_t, ack_serial )
+ __field(u8, reason )
+ __field(u8, n_acks )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->ack_first = ack_first;
+ __entry->ack_serial = ack_serial;
+ __entry->reason = reason;
+ __entry->n_acks = n_acks;
+ ),
+
+ TP_printk(" c=%p ACK %08x %s f=%08x r=%08x n=%u",
+ __entry->call,
+ __entry->serial,
+ rxrpc_ack_names[__entry->reason],
+ __entry->ack_first,
+ __entry->ack_serial,
+ __entry->n_acks)
+ );
+
+TRACE_EVENT(rxrpc_receive,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
+ rxrpc_serial_t serial, rxrpc_seq_t seq),
+
+ TP_ARGS(call, why, serial, seq),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_receive_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->seq = seq;
+ __entry->hard_ack = call->rx_hard_ack;
+ __entry->top = call->rx_top;
+ ),
+
+ TP_printk("c=%p %s r=%08x q=%08x w=%08x-%08x",
+ __entry->call,
+ rxrpc_receive_traces[__entry->why],
+ __entry->serial,
+ __entry->seq,
+ __entry->hard_ack,
+ __entry->top)
+ );
+
+TRACE_EVENT(rxrpc_recvmsg,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+ rxrpc_seq_t seq, unsigned int offset, unsigned int len,
+ int ret),
+
+ TP_ARGS(call, why, seq, offset, len, ret),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_recvmsg_trace, why )
+ __field(rxrpc_seq_t, seq )
+ __field(unsigned int, offset )
+ __field(unsigned int, len )
+ __field(int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->seq = seq;
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("c=%p %s q=%08x o=%u l=%u ret=%d",
+ __entry->call,
+ rxrpc_recvmsg_traces[__entry->why],
+ __entry->seq,
+ __entry->offset,
+ __entry->len,
+ __entry->ret)
+ );
+
+TRACE_EVENT(rxrpc_rtt_tx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
+ rxrpc_serial_t send_serial),
+
+ TP_ARGS(call, why, send_serial),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_tx_trace, why )
+ __field(rxrpc_serial_t, send_serial )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ ),
+
+ TP_printk("c=%p %s sr=%08x",
+ __entry->call,
+ rxrpc_rtt_tx_traces[__entry->why],
+ __entry->send_serial)
+ );
+
+TRACE_EVENT(rxrpc_rtt_rx,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
+ rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
+ s64 rtt, u8 nr, s64 avg),
+
+ TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_rtt_rx_trace, why )
+ __field(u8, nr )
+ __field(rxrpc_serial_t, send_serial )
+ __field(rxrpc_serial_t, resp_serial )
+ __field(s64, rtt )
+ __field(u64, avg )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->send_serial = send_serial;
+ __entry->resp_serial = resp_serial;
+ __entry->rtt = rtt;
+ __entry->nr = nr;
+ __entry->avg = avg;
+ ),
+
+ TP_printk("c=%p %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld",
+ __entry->call,
+ rxrpc_rtt_rx_traces[__entry->why],
+ __entry->send_serial,
+ __entry->resp_serial,
+ __entry->rtt,
+ __entry->nr,
+ __entry->avg)
+ );
+
+TRACE_EVENT(rxrpc_timer,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+ ktime_t now, unsigned long now_j),
+
+ TP_ARGS(call, why, now, now_j),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_timer_trace, why )
+ __field_struct(ktime_t, now )
+ __field_struct(ktime_t, expire_at )
+ __field_struct(ktime_t, ack_at )
+ __field_struct(ktime_t, resend_at )
+ __field(unsigned long, now_j )
+ __field(unsigned long, timer )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->expire_at = call->expire_at;
+ __entry->ack_at = call->ack_at;
+ __entry->resend_at = call->resend_at;
+ __entry->now_j = now_j;
+ __entry->timer = call->timer.expires;
+ ),
+
+ TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ __entry->call,
+ rxrpc_timer_traces[__entry->why],
+ ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
+ ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
+ __entry->timer - __entry->now_j)
+ );
+
+TRACE_EVENT(rxrpc_rx_lose,
+ TP_PROTO(struct rxrpc_skb_priv *sp),
+
+ TP_ARGS(sp),
+
+ TP_STRUCT__entry(
+ __field_struct(struct rxrpc_host_header, hdr )
+ ),
+
+ TP_fast_assign(
+ memcpy(&__entry->hdr, &sp->hdr, sizeof(__entry->hdr));
+ ),
+
+ TP_printk("%08x:%08x:%08x:%04x %08x %08x %02x %02x %s *LOSE*",
+ __entry->hdr.epoch, __entry->hdr.cid,
+ __entry->hdr.callNumber, __entry->hdr.serviceId,
+ __entry->hdr.serial, __entry->hdr.seq,
+ __entry->hdr.type, __entry->hdr.flags,
+ __entry->hdr.type <= 15 ? rxrpc_pkts[__entry->hdr.type] : "?UNK")
+ );
+
+TRACE_EVENT(rxrpc_propose_ack,
+ TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
+ u8 ack_reason, rxrpc_serial_t serial, bool immediate,
+ bool background, enum rxrpc_propose_ack_outcome outcome),
+
+ TP_ARGS(call, why, ack_reason, serial, immediate, background,
+ outcome),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_propose_ack_trace, why )
+ __field(rxrpc_serial_t, serial )
+ __field(u8, ack_reason )
+ __field(bool, immediate )
+ __field(bool, background )
+ __field(enum rxrpc_propose_ack_outcome, outcome )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->why = why;
+ __entry->serial = serial;
+ __entry->ack_reason = ack_reason;
+ __entry->immediate = immediate;
+ __entry->background = background;
+ __entry->outcome = outcome;
+ ),
+
+ TP_printk("c=%p %s %s r=%08x i=%u b=%u%s",
+ __entry->call,
+ rxrpc_propose_ack_traces[__entry->why],
+ rxrpc_ack_names[__entry->ack_reason],
+ __entry->serial,
+ __entry->immediate,
+ __entry->background,
+ rxrpc_propose_ack_outcomes[__entry->outcome])
+ );
+
+TRACE_EVENT(rxrpc_retransmit,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
+ s64 expiry),
+
+ TP_ARGS(call, seq, annotation, expiry),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_seq_t, seq )
+ __field(u8, annotation )
+ __field(s64, expiry )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->seq = seq;
+ __entry->annotation = annotation;
+ __entry->expiry = expiry;
+ ),
+
+ TP_printk("c=%p q=%x a=%02x xp=%lld",
+ __entry->call,
+ __entry->seq,
+ __entry->annotation,
+ __entry->expiry)
+ );
+
+TRACE_EVENT(rxrpc_congest,
+ TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
+ rxrpc_serial_t ack_serial, enum rxrpc_congest_change change),
+
+ TP_ARGS(call, summary, ack_serial, change),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(enum rxrpc_congest_change, change )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, top )
+ __field(rxrpc_seq_t, lowest_nak )
+ __field(rxrpc_serial_t, ack_serial )
+ __field_struct(struct rxrpc_ack_summary, sum )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->change = change;
+ __entry->hard_ack = call->tx_hard_ack;
+ __entry->top = call->tx_top;
+ __entry->lowest_nak = call->acks_lowest_nak;
+ __entry->ack_serial = ack_serial;
+ memcpy(&__entry->sum, summary, sizeof(__entry->sum));
+ ),
+
+ TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ __entry->call,
+ __entry->ack_serial,
+ rxrpc_ack_names[__entry->sum.ack_reason],
+ __entry->hard_ack,
+ rxrpc_congest_modes[__entry->sum.mode],
+ __entry->sum.cwnd,
+ __entry->sum.ssthresh,
+ __entry->sum.nr_acks, __entry->sum.nr_nacks,
+ __entry->sum.nr_new_acks, __entry->sum.nr_new_nacks,
+ __entry->sum.nr_rot_new_acks,
+ __entry->top - __entry->hard_ack,
+ __entry->sum.cumulative_acks,
+ __entry->sum.dup_acks,
+ __entry->lowest_nak, __entry->sum.new_low_nack ? "!" : "",
+ rxrpc_congest_changes[__entry->change],
+ __entry->sum.retrans_timeo ? " rTxTo" : "")
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f896dfac4ac0..f09c70b97eca 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -398,6 +398,34 @@ enum bpf_func_id {
*/
BPF_FUNC_skb_change_tail,
+ /**
+ * bpf_skb_pull_data(skb, len)
+ * The helper will pull in non-linear data in case the
+ * skb is non-linear and not all of len are part of the
+ * linear section. Only needed for read/write with direct
+ * packet access.
+ * @skb: pointer to skb
+ * @len: len to make read/writeable
+ * Return: 0 on success or negative error
+ */
+ BPF_FUNC_skb_pull_data,
+
+ /**
+ * bpf_csum_update(skb, csum)
+ * Adds csum into skb->csum in case of CHECKSUM_COMPLETE.
+ * @skb: pointer to skb
+ * @csum: csum to add
+ * Return: csum on success or negative error
+ */
+ BPF_FUNC_csum_update,
+
+ /**
+ * bpf_set_hash_invalid(skb)
+ * Invalidate current skb>hash.
+ * @skb: pointer to skb
+ */
+ BPF_FUNC_set_hash_invalid,
+
__BPF_FUNC_MAX_ID,
};
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 9bf3aecfe05b..b4fba662cd32 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -464,6 +464,7 @@ enum {
enum ipvlan_mode {
IPVLAN_MODE_L2 = 0,
IPVLAN_MODE_L3,
+ IPVLAN_MODE_L3S,
IPVLAN_MODE_MAX
};
@@ -618,7 +619,7 @@ enum {
enum {
IFLA_VF_UNSPEC,
IFLA_VF_MAC, /* Hardware queue specific attributes */
- IFLA_VF_VLAN,
+ IFLA_VF_VLAN, /* VLAN ID and QoS */
IFLA_VF_TX_RATE, /* Max TX Bandwidth Allocation */
IFLA_VF_SPOOFCHK, /* Spoof Checking on/off switch */
IFLA_VF_LINK_STATE, /* link state enable/disable/auto switch */
@@ -630,6 +631,7 @@ enum {
IFLA_VF_TRUST, /* Trust VF */
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
+ IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
__IFLA_VF_MAX,
};
@@ -646,6 +648,22 @@ struct ifla_vf_vlan {
__u32 qos;
};
+enum {
+ IFLA_VF_VLAN_INFO_UNSPEC,
+ IFLA_VF_VLAN_INFO, /* VLAN ID, QoS and VLAN protocol */
+ __IFLA_VF_VLAN_INFO_MAX,
+};
+
+#define IFLA_VF_VLAN_INFO_MAX (__IFLA_VF_VLAN_INFO_MAX - 1)
+#define MAX_VLAN_LIST_LEN 1
+
+struct ifla_vf_vlan_info {
+ __u32 vf;
+ __u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
+ __u32 qos;
+ __be16 vlan_proto; /* VLAN protocol either 802.1Q or 802.1ad */
+};
+
struct ifla_vf_tx_rate {
__u32 vf;
__u32 rate; /* Max TX bandwidth in Mbps, 0 disables throttling */
@@ -826,6 +844,7 @@ enum {
IFLA_STATS_LINK_64,
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
+ IFLA_STATS_LINK_OFFLOAD_XSTATS,
__IFLA_STATS_MAX,
};
@@ -845,6 +864,14 @@ enum {
};
#define LINK_XSTATS_TYPE_MAX (__LINK_XSTATS_TYPE_MAX - 1)
+/* These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS */
+enum {
+ IFLA_OFFLOAD_XSTATS_UNSPEC,
+ IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */
+ __IFLA_OFFLOAD_XSTATS_MAX
+};
+#define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1)
+
/* XDP section */
enum {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 18d5dc13985d..92f3c8677523 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -39,6 +39,7 @@
#define GRE_IS_REC(f) ((f) & GRE_REC)
#define GRE_IS_ACK(f) ((f) & GRE_ACK)
+#define GRE_VERSION_0 __cpu_to_be16(0x0000)
#define GRE_VERSION_1 __cpu_to_be16(0x0001)
#define GRE_PROTO_PPP __cpu_to_be16(0x880b)
#define GRE_PPTP_KEY_MASK __cpu_to_be32(0xffff)
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index b5c366f87b3e..509cd961068d 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -124,6 +124,7 @@ enum {
INET_DIAG_PEERS,
INET_DIAG_PAD,
INET_DIAG_MARK,
+ INET_DIAG_BBRINFO,
__INET_DIAG_MAX,
};
@@ -157,8 +158,20 @@ struct tcp_dctcp_info {
__u32 dctcp_ab_tot;
};
+/* INET_DIAG_BBRINFO */
+
+struct tcp_bbr_info {
+ /* u64 bw: max-filtered BW (app throughput) estimate in Byte per sec: */
+ __u32 bbr_bw_lo; /* lower 32 bits of bw */
+ __u32 bbr_bw_hi; /* upper 32 bits of bw */
+ __u32 bbr_min_rtt; /* min-filtered RTT in uSec */
+ __u32 bbr_pacing_gain; /* pacing gain shifted left 8 bits */
+ __u32 bbr_cwnd_gain; /* cwnd gain shifted left 8 bits */
+};
+
union tcp_cc_info {
struct tcpvegas_info vegas;
struct tcp_dctcp_info dctcp;
+ struct tcp_bbr_info bbr;
};
#endif /* _UAPI_INET_DIAG_H_ */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 395876060f50..8c2772340c3f 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -177,6 +177,7 @@ enum {
DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
DEVCONF_DROP_UNSOLICITED_NA,
DEVCONF_KEEP_ADDR_ON_DOWN,
+ DEVCONF_RTR_SOLICIT_MAX_INTERVAL,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
new file mode 100644
index 000000000000..8be21e02387d
--- /dev/null
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -0,0 +1,12 @@
+#ifndef _NETFILTER_NF_LOG_H
+#define _NETFILTER_NF_LOG_H
+
+#define NF_LOG_TCPSEQ 0x01 /* Log TCP sequence numbers */
+#define NF_LOG_TCPOPT 0x02 /* Log TCP options */
+#define NF_LOG_IPOPT 0x04 /* Log IP options */
+#define NF_LOG_UID 0x08 /* Log UID owning local socket */
+#define NF_LOG_NFLOG 0x10 /* Unsupported, don't reuse */
+#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
+#define NF_LOG_MASK 0x2f
+
+#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 28ce01d79707..c6c4477c136b 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -546,6 +546,35 @@ enum nft_cmp_attributes {
};
#define NFTA_CMP_MAX (__NFTA_CMP_MAX - 1)
+/**
+ * enum nft_range_ops - nf_tables range operator
+ *
+ * @NFT_RANGE_EQ: equal
+ * @NFT_RANGE_NEQ: not equal
+ */
+enum nft_range_ops {
+ NFT_RANGE_EQ,
+ NFT_RANGE_NEQ,
+};
+
+/**
+ * enum nft_range_attributes - nf_tables range expression netlink attributes
+ *
+ * @NFTA_RANGE_SREG: source register of data to compare (NLA_U32: nft_registers)
+ * @NFTA_RANGE_OP: cmp operation (NLA_U32: nft_cmp_ops)
+ * @NFTA_RANGE_FROM_DATA: data range from (NLA_NESTED: nft_data_attributes)
+ * @NFTA_RANGE_TO_DATA: data range to (NLA_NESTED: nft_data_attributes)
+ */
+enum nft_range_attributes {
+ NFTA_RANGE_UNSPEC,
+ NFTA_RANGE_SREG,
+ NFTA_RANGE_OP,
+ NFTA_RANGE_FROM_DATA,
+ NFTA_RANGE_TO_DATA,
+ __NFTA_RANGE_MAX
+};
+#define NFTA_RANGE_MAX (__NFTA_RANGE_MAX - 1)
+
enum nft_lookup_flags {
NFT_LOOKUP_F_INV = (1 << 0),
};
@@ -575,6 +604,10 @@ enum nft_dynset_ops {
NFT_DYNSET_OP_UPDATE,
};
+enum nft_dynset_flags {
+ NFT_DYNSET_F_INV = (1 << 0),
+};
+
/**
* enum nft_dynset_attributes - dynset expression attributes
*
@@ -585,6 +618,7 @@ enum nft_dynset_ops {
* @NFTA_DYNSET_SREG_DATA: source register of the data (NLA_U32)
* @NFTA_DYNSET_TIMEOUT: timeout value for the new element (NLA_U64)
* @NFTA_DYNSET_EXPR: expression (NLA_NESTED: nft_expr_attributes)
+ * @NFTA_DYNSET_FLAGS: flags (NLA_U32)
*/
enum nft_dynset_attributes {
NFTA_DYNSET_UNSPEC,
@@ -596,6 +630,7 @@ enum nft_dynset_attributes {
NFTA_DYNSET_TIMEOUT,
NFTA_DYNSET_EXPR,
NFTA_DYNSET_PAD,
+ NFTA_DYNSET_FLAGS,
__NFTA_DYNSET_MAX,
};
#define NFTA_DYNSET_MAX (__NFTA_DYNSET_MAX - 1)
@@ -731,6 +766,7 @@ enum nft_meta_keys {
* @NFTA_HASH_LEN: source data length (NLA_U32)
* @NFTA_HASH_MODULUS: modulus value (NLA_U32)
* @NFTA_HASH_SEED: seed value (NLA_U32)
+ * @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
*/
enum nft_hash_attributes {
NFTA_HASH_UNSPEC,
@@ -739,6 +775,7 @@ enum nft_hash_attributes {
NFTA_HASH_LEN,
NFTA_HASH_MODULUS,
NFTA_HASH_SEED,
+ NFTA_HASH_OFFSET,
__NFTA_HASH_MAX,
};
#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
@@ -886,12 +923,14 @@ enum nft_log_attributes {
* @NFTA_QUEUE_NUM: netlink queue to send messages to (NLA_U16)
* @NFTA_QUEUE_TOTAL: number of queues to load balance packets on (NLA_U16)
* @NFTA_QUEUE_FLAGS: various flags (NLA_U16)
+ * @NFTA_QUEUE_SREG_QNUM: source register of queue number (NLA_U32: nft_registers)
*/
enum nft_queue_attributes {
NFTA_QUEUE_UNSPEC,
NFTA_QUEUE_NUM,
NFTA_QUEUE_TOTAL,
NFTA_QUEUE_FLAGS,
+ NFTA_QUEUE_SREG_QNUM,
__NFTA_QUEUE_MAX
};
#define NFTA_QUEUE_MAX (__NFTA_QUEUE_MAX - 1)
@@ -1126,14 +1165,16 @@ enum nft_trace_types {
* enum nft_ng_attributes - nf_tables number generator expression netlink attributes
*
* @NFTA_NG_DREG: destination register (NLA_U32)
- * @NFTA_NG_UNTIL: source value to increment the counter until reset (NLA_U32)
+ * @NFTA_NG_MODULUS: maximum counter value (NLA_U32)
* @NFTA_NG_TYPE: operation type (NLA_U32)
+ * @NFTA_NG_OFFSET: offset to be added to the counter (NLA_U32)
*/
enum nft_ng_attributes {
NFTA_NG_UNSPEC,
NFTA_NG_DREG,
- NFTA_NG_UNTIL,
+ NFTA_NG_MODULUS,
NFTA_NG_TYPE,
+ NFTA_NG_OFFSET,
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 9df789709abe..6deb8867c5fc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -231,13 +231,13 @@ enum ctattr_secctx {
enum ctattr_stats_cpu {
CTA_STATS_UNSPEC,
- CTA_STATS_SEARCHED,
+ CTA_STATS_SEARCHED, /* no longer used */
CTA_STATS_FOUND,
- CTA_STATS_NEW,
+ CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
CTA_STATS_IGNORE,
- CTA_STATS_DELETE,
- CTA_STATS_DELETE_LIST,
+ CTA_STATS_DELETE, /* no longer used */
+ CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
CTA_STATS_INSERT_FAILED,
CTA_STATS_DROP,
diff --git a/include/uapi/linux/netfilter/xt_hashlimit.h b/include/uapi/linux/netfilter/xt_hashlimit.h
index 6db90372f09c..3efc0ca18345 100644
--- a/include/uapi/linux/netfilter/xt_hashlimit.h
+++ b/include/uapi/linux/netfilter/xt_hashlimit.h
@@ -6,6 +6,7 @@
/* timings are in milliseconds. */
#define XT_HASHLIMIT_SCALE 10000
+#define XT_HASHLIMIT_SCALE_v2 1000000llu
/* 1/10,000 sec period => max of 10,000/sec. Min rate is then 429490
* seconds, or one packet every 59 hours.
*/
@@ -63,6 +64,20 @@ struct hashlimit_cfg1 {
__u8 srcmask, dstmask;
};
+struct hashlimit_cfg2 {
+ __u64 avg; /* Average secs between packets * scale */
+ __u64 burst; /* Period multiplier for upper limit. */
+ __u32 mode; /* bitmask of XT_HASHLIMIT_HASH_* */
+
+ /* user specified */
+ __u32 size; /* how many buckets */
+ __u32 max; /* max number of entries */
+ __u32 gc_interval; /* gc interval */
+ __u32 expire; /* when do entries expire? */
+
+ __u8 srcmask, dstmask;
+};
+
struct xt_hashlimit_mtinfo1 {
char name[IFNAMSIZ];
struct hashlimit_cfg1 cfg;
@@ -71,4 +86,12 @@ struct xt_hashlimit_mtinfo1 {
struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
};
+struct xt_hashlimit_mtinfo2 {
+ char name[NAME_MAX];
+ struct hashlimit_cfg2 cfg;
+
+ /* Used internally by the kernel */
+ struct xt_hashlimit_htable *hinfo __attribute__((aligned(8)));
+};
+
#endif /* _UAPI_XT_HASHLIMIT_H */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 8915b61bbf83..8fd715f806a2 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -396,6 +396,7 @@ enum {
TCA_BPF_FD,
TCA_BPF_NAME,
TCA_BPF_FLAGS,
+ TCA_BPF_FLAGS_GEN,
__TCA_BPF_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 2382eed50278..df7451d35131 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -792,6 +792,8 @@ enum {
TCA_FQ_ORPHAN_MASK, /* mask applied to orphaned skb hashes */
+ TCA_FQ_LOW_RATE_THRESHOLD, /* per packet delay under this rate */
+
__TCA_FQ_MAX
};
@@ -809,7 +811,7 @@ struct tc_fq_qd_stats {
__u32 flows;
__u32 inactive_flows;
__u32 throttled_flows;
- __u32 pad;
+ __u32 unthrottle_latency_ns;
};
/* Heavy-Hitter Filter */
diff --git a/include/uapi/linux/tc_act/tc_ife.h b/include/uapi/linux/tc_act/tc_ife.h
index 4ece02a77b9a..cd18360eca24 100644
--- a/include/uapi/linux/tc_act/tc_ife.h
+++ b/include/uapi/linux/tc_act/tc_ife.h
@@ -32,8 +32,9 @@ enum {
#define IFE_META_HASHID 2
#define IFE_META_PRIO 3
#define IFE_META_QMAP 4
+#define IFE_META_TCINDEX 5
/*Can be overridden at runtime by module option*/
-#define __IFE_META_MAX 5
+#define __IFE_META_MAX 6
#define IFE_META_MAX (__IFE_META_MAX - 1)
#endif
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index be72b6e3843b..bddb272b843f 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,7 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
+#define TCA_VLAN_ACT_MODIFY 3
struct tc_vlan {
tc_gen;
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 482898fc433a..73ac0db487f8 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -167,6 +167,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
+ __u8 tcpi_delivery_rate_app_limited:1;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -211,6 +212,8 @@ struct tcp_info {
__u32 tcpi_min_rtt;
__u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */
__u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */
+
+ __u64 tcpi_delivery_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 143338978b48..1fc62b239f1b 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,7 +298,7 @@ enum xfrm_attr_type_t {
XFRMA_ALG_AUTH_TRUNC, /* struct xfrm_algo_auth */
XFRMA_MARK, /* struct xfrm_mark */
XFRMA_TFCPAD, /* __u32 */
- XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
+ XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_state_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */